repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
slashdd/sos
|
sos/cleaner/mappings/username_map.py
|
Python
|
gpl-2.0
| 1,265 | 0 |
# Copyright 2020 Red Hat, Inc. Jake Hunsaker <jhunsake@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and c
|
onditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.cleaner.mappings import SoSMap
class SoSUsernameMap(SoSMap):
"""Mapping to store usernames ma
|
tched from ``lastlog`` output.
Usernames are obfuscated as ``obfuscateduserX`` where ``X`` is a counter
that gets incremented for every new username found.
Note that this specifically obfuscates user_names_ and not UIDs.
"""
name_count = 0
def load_names_from_options(self, opt_names):
for name in opt_names:
if name not in self.dataset.keys():
self.add(name)
def sanitize_item(self, username):
"""Obfuscate a new username not currently found in the map
"""
ob_name = "obfuscateduser%s" % self.name_count
self.name_count += 1
if ob_name in self.dataset.values():
return self.sanitize_item(username)
return ob_name
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-ilmbase/package.py
|
Python
|
lgpl-2.1
| 1,026 | 0.004873 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyIlmbase(AutotoolsPackage):
"""The PyIlmBase libraries provides python bindings for the IlmBase librar
|
ies."""
homepage = "https://github.com/AcademySof
|
twareFoundation/openexr/tree/v2.3.0/PyIlmBase"
url = "https://github.com/AcademySoftwareFoundation/openexr/releases/download/v2.3.0/pyilmbase-2.3.0.tar.gz"
version('2.3.0', sha256='9c898bb16e7bc916c82bebdf32c343c0f2878fc3eacbafa49937e78f2079a425')
depends_on('ilmbase')
depends_on('boost+python')
# https://github.com/AcademySoftwareFoundation/openexr/issues/336
parallel = False
def configure_args(self):
spec = self.spec
args = [
'--with-boost-python-libname=boost_python{0}'.format(
spec['python'].version.up_to(2).joined)
]
return args
|
ponyorm/pony
|
pony/orm/tests/test_prop_sum_orderby.py
|
Python
|
apache-2.0
| 5,191 | 0.000385 |
from
|
__future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
from pony.orm.tests.testutils import *
from pony.orm.tests import setup_database, teardown_database
db = Database()
db = Database('sqlite', ':memory:')
class Product(db.Entity):
id = PrimaryKey(int)
name = Requ
|
ired(str)
comments = Set('Comment')
@property
def sum_01(self):
return coalesce(select(c.points for c in self.comments).sum(), 0)
@property
def sum_02(self):
return coalesce(select(c.points for c in self.comments).sum(), 0.0)
@property
def sum_03(self):
return coalesce(select(sum(c.points) for c in self.comments), 0)
@property
def sum_04(self):
return coalesce(select(sum(c.points) for c in self.comments), 0.0)
@property
def sum_05(self):
return sum(c.points for c in self.comments)
@property
def sum_06(self):
return coalesce(sum(c.points for c in self.comments), 0)
@property
def sum_07(self):
return coalesce(sum(c.points for c in self.comments), 0.0)
@property
def sum_08(self):
return select(sum(c.points) for c in self.comments)
@property
def sum_09(self):
return select(coalesce(sum(c.points), 0) for c in self.comments)
@property
def sum_10(self):
return select(coalesce(sum(c.points), 0.0) for c in self.comments)
@property
def sum_11(self):
return select(sum(c.points) for c in self.comments)
@property
def sum_12(self):
return sum(self.comments.points)
@property
def sum_13(self):
return coalesce(sum(self.comments.points), 0)
@property
def sum_14(self):
return coalesce(sum(self.comments.points), 0.0)
class Comment(db.Entity):
id = PrimaryKey(int)
points = Required(int)
product = Optional('Product')
class TestQuerySetMonad(unittest.TestCase):
@classmethod
def setUpClass(cls):
setup_database(db)
with db_session:
p1 = Product(id=1, name='P1')
p2 = Product(id=2, name='P1', comments=[
Comment(id=201, points=5)
])
p3 = Product(id=3, name='P1', comments=[
Comment(id=301, points=1), Comment(id=302, points=2)
])
p4 = Product(id=4, name='P1', comments=[
Comment(id=401, points=1), Comment(id=402, points=5), Comment(id=403, points=1)
])
@classmethod
def tearDownClass(cls):
teardown_database(db)
def setUp(self):
rollback()
db_session.__enter__()
def tearDown(self):
rollback()
db_session.__exit__()
def test_sum_01(self):
q = list(Product.select().sort_by(lambda p: p.sum_01))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_02(self):
q = list(Product.select().sort_by(lambda p: p.sum_02))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_03(self):
q = list(Product.select().sort_by(lambda p: p.sum_03))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_04(self):
q = list(Product.select().sort_by(lambda p: p.sum_04))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_05(self):
q = list(Product.select().sort_by(lambda p: p.sum_05))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_06(self):
q = list(Product.select().sort_by(lambda p: p.sum_06))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_07(self):
q = list(Product.select().sort_by(lambda p: p.sum_07))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_08(self):
q = list(Product.select().sort_by(lambda p: p.sum_08))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_09(self):
q = list(Product.select().sort_by(lambda p: p.sum_09))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_10(self):
q = list(Product.select().sort_by(lambda p: p.sum_10))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_11(self):
q = list(Product.select().sort_by(lambda p: p.sum_11))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_12(self):
q = list(Product.select().sort_by(lambda p: p.sum_12))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_13(self):
q = list(Product.select().sort_by(lambda p: p.sum_13))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
def test_sum_14(self):
q = list(Product.select().sort_by(lambda p: p.sum_14))
result = [p.id for p in q]
self.assertEqual(result, [1, 3, 2, 4])
if __name__ == "__main__":
unittest.main()
|
jbasko/pytest-random-order
|
tests/test_cli.py
|
Python
|
mit
| 503 | 0.001988 |
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
result.stdout.fnmatch_lines([
|
'pytest-random-order options:',
'*--random-order-bucket={global,package,module,class,parent,grandparent,none}*',
'*--random-order-seed=*',
])
def test_markers_message(testdir):
result = testdir.runpytest(
'--markers',
)
result.stdout.fnmatch_lines(
|
[
'*@pytest.mark.random_order(disabled=True): disable reordering*',
])
|
gfxprim/gfxprim
|
tests/pylib/test_gfx.py
|
Python
|
lgpl-2.1
| 3,030 | 0.032013 |
"core.Pixmap tests"
from unittest import SkipTest
from testutils import *
from gfxprim.core import Pixmap
from gfxprim import gfx, core
def test_gfx_submodule_loads():
"gfx is present in a Pixmap"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx
def test_gfx_submodule_has_C():
"gfx contains C"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
assert c.gfx.C
assert gfx.C
# These set the param types of the functions in GFX
gfx_params = {
'arc_segment': 'IIIIIFFP',
'circle': 'IIIP',
'ellipse': 'IIIIP',
'fill': 'P',
'fill_circle': 'IIIP',
'fill_ellipse': 'IIIIP',
'fill_polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'fill_rect': 'IIIIP',
'fill_ring': 'IIIIP',
'fill_tetragon': 'IIIIIIIIP',
'fill_triangle': 'IIIIIIP',
'hline': 'IIIP',
'hline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
'line': 'IIIIP',
'line_aa': 'IIIIP', # Fixpoint, originally 'FFFFP'
'polygon': ([(0,0),(1,1),(1,0)], 0, {}),
'putpixel_aa': 'IIP', # Fixpoint, originally 'FFP'
'rect': 'IIIIP',
'ring': 'IIIIP',
'tetragon': 'IIIIIIIIP',
'triangle': 'IIIIIIP',
'vline': 'IIIP',
'vline_aa': 'IIIP', # Fixpoint, originally 'FFFP'
}
def test_all_methods_are_known():
"All methods of gfx submodule have known param types in this test"
c = Pixmap(1, 1, core.C.PIXEL_RGB888)
for name in dir(c.gfx):
if name[0] != '_' and name not in ['C', 'ctx']:
assert name in gfx_params
def gen_dummy_args(params):
"""
Generate dummy parameter tuple according to characters in the given string.
0 - 0
S - String ("")
I - Int (1)
F - Float (0.5)
P - Pixel (0)
"""
args = []
for t in params:
if t == '0':
args.append(0)
elif t == 'I':
args.append(1)
elif t == 'P':
args.append(0)
elif t == 'F':
args.append(0.5)
elif t == 'S':
args.append("")
else:
assert False
return tuple(args)
@for_each_case(gfx_params)
def test_method_call(n, params):
"Calling with dummy parameters:"
c = PixmapRand(10, 10, core.C.PIXEL_RGB888)
if isinstance(params, str):
c.gfx.__getattribute__(n)(*gen_dummy_args(params))
else:
assert isinstance(params, tuple) and isinstance(params[-1], dict)
c.gfx.__getattribute__(n)(*params[:-1], **params[-1])
def test_Polygon():
"Polygon() works"
c0 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
c1 = PixmapRa
|
nd(13, 12, core.C.PIXEL_RGB888, seed=42)
c2 = PixmapRand(13, 12, core.C.PIXEL_RGB888, seed=42)
assert c1 == c0
c1.gfx.polygon([1,2,0,4,7,9,5,4,3,2], 43)
c2.gfx.polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 43)
assert c1 == c2
assert c1 != c0
def test_FillPolygon():
"FillPolygon() works"
c0 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c1 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
c2 = PixmapRand(13, 9, core.C.PIXEL_RGB888, seed=41)
|
assert c1 == c0
c1.gfx.fill_polygon([1,2,0,4,7,9,5,4,3,2], 0)
c2.gfx.fill_polygon([(1,2),(0,4),(7,9),(5,4),(3,2)], 0)
assert c1 == c2
assert c1 != c0
|
pudo/aleph
|
aleph/migrate/versions/2979a1322381_cleanup.py
|
Python
|
mit
| 712 | 0.001404 |
"""Clean up notifications schema, some other parts.
Revision ID: 2979a1322381
Revises: 2b478162b2b7
Create Date: 2020-03-03 07:32:54.113550
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = "2979a1322381"
down_revision = "2b478162b2b7"
def upgrade():
op.drop_index("ix_notification_channels", table_name="notification")
|
op.drop_table("notification")
op.drop_column("diagram", "data")
op.drop_constraint("document_parent_id_fkey", "document", type_="foreignkey")
# op.alter_column('role', 'is_muted',
# ex
|
isting_type=sa.BOOLEAN(),
# nullable=False)
op.drop_column("role", "notified_at")
def downgrade():
pass
|
JFriel/honours_project
|
venv/lib/python2.7/site-packages/nltk/corpus/reader/panlex_lite.py
|
Python
|
gpl-3.0
| 5,153 | 0.004463 |
# Natural Language Toolkit: PanLex Corpus Reader
#
# Copyright (C) 2001-2016 NLTK Project
# Author: David Kamholz <kamholz@panlex.org>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
CorpusReader for PanLex Lite, a stripped down version of PanLex distributed
as an SQLite database. See the README.txt in the panlex_lite corpus directory
for more information on PanLex Lite.
"""
import os
import sqlite3
from nltk.corpus.reader.api import CorpusReader
class PanLexLiteCorpusReader(CorpusReader):
MEANING_Q = """
SELECT dnx2.mn, dnx2.uq, dnx2.ap, dnx2.ui, ex2.tt, ex2.lv
FROM dnx
JOIN ex ON (ex.ex = dnx.ex)
JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
JOIN ex ex2 ON (ex2.ex = dnx2.ex)
WHERE dnx.ex != dnx2.ex AND ex.tt = ? AND ex.lv = ?
ORDER BY dnx2.uq DESC
"""
TRANSLATION_Q = """
SELECT s.tt, sum(s.uq) AS trq FROM (
SELECT ex2.tt, max(dnx.uq) AS uq
FROM dnx
JOIN ex ON (ex.ex = dnx.ex)
JOIN dnx dnx2 ON (dnx2.mn = dnx.mn)
JOIN ex ex2 ON (ex2.ex = dnx2.ex)
WHERE dnx.ex != dnx2.ex AND ex.lv = ? AND ex.tt = ? AND ex2.lv = ?
GROUP BY ex2.tt, dnx.ui
) s
GROUP BY s.tt
ORDER BY trq DESC, s.tt
"""
def __init__(self, root):
self._c = sqlite3.connect(os.path.join(root, 'db.sqlite')).cursor()
self._uid_lv = {}
self._lv_uid = {}
for row in self._c.execute('SELECT uid, lv FROM lv'):
self._uid_lv[row[0]] = row[1]
self._lv_uid[row[1]] = row[0]
def language_varieties(self, lc=None):
"""
Return a list of PanLex language varieties.
:param lc: ISO 639 alpha-3 code. If specified, filters returned varieties
by this code. If unspecified, all varieties are returned.
:return: the specified language varieties as a list of tuples. The first
element is the language variety's seven-character uniform identifier,
and the second element is its default name.
:rtype: list(tuple)
"""
if lc == None:
return self._c.execute('SELECT uid, tt FROM lv ORDER BY uid').fetchall()
else:
return self._c.execute('SELECT uid, tt FROM lv WHERE lc = ? ORDER BY uid', (lc,)).fetchall()
def meanings(self, expr_uid, expr_tt):
"""
Return a list of meanings for an expression.
:param expr_uid: the expression's language variety, as a seven-character
uniform identifier.
:param expr_tt: the expression's text.
:return: a list of Meaning objects.
:rtype: list(Meaning)
"""
expr_lv = self._uid_lv[expr_uid]
mn_info = {}
for i in self._c.execute(self.MEANING_Q, (expr_tt, expr_lv)):
mn = i[0]
uid = self._lv_uid[i[5]]
if not mn in mn_info:
mn_info[mn] = { 'uq': i[1], 'ap': i[2], 'ui': i[3], 'ex': { expr_uid: [expr_tt] } }
if not uid in mn_info[mn]['ex']:
mn_info[mn]['ex'][uid] = []
mn_info[mn]['ex'][uid].append(i[4])
return [ Meaning(mn, mn_info[mn]) for mn in mn_info ]
def translations(self, from_uid, from_tt, to_uid):
"""
Return a list of translations for an expression into a single language
variety.
:param from_uid: the source expression's language variety, as a
seven-character uniform identifier.
:param from_tt: the source expression's text.
:param to_uid: the target language variety, as a seven-character
uniform identifier.
:return a list of translation tuples. The first element is the expression
tex
|
t and the second element is the transl
|
ation quality.
:rtype: list(tuple)
"""
from_lv = self._uid_lv[from_uid]
to_lv = self._uid_lv[to_uid]
return self._c.execute(self.TRANSLATION_Q, (from_lv, from_tt, to_lv)).fetchall()
class Meaning(dict):
"""
Represents a single PanLex meaning. A meaning is a translation set derived
from a single source.
"""
def __init__(self, mn, attr):
super(Meaning, self).__init__(**attr)
self['mn'] = mn
def id(self):
"""
:return: the meaning's id.
:rtype: int
"""
return self['mn']
def quality(self):
"""
:return: the meaning's source's quality (0=worst, 9=best).
:rtype: int
"""
return self['uq']
def source(self):
"""
:return: the meaning's source id.
:rtype: int
"""
return self['ap']
def source_group(self):
"""
:return: the meaning's source group id.
:rtype: int
"""
return self['ui']
def expressions(self):
"""
:return: the meaning's expressions as a dictionary whose keys are language
variety uniform identifiers and whose values are lists of expression
texts.
:rtype: dict
"""
return self['ex']
|
iohannez/gnuradio
|
gr-filter/python/filter/qa_pfb_arb_resampler.py
|
Python
|
gpl-3.0
| 8,395 | 0.004169 |
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you c
|
an redistri
|
bute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
from gnuradio import gr, gr_unittest, filter, blocks
import math
def sig_source_c(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.cos(2.*math.pi*freq*x) + \
1j*math.sin(2.*math.pi*freq*x) for x in t]
return y
def sig_source_f(samp_rate, freq, amp, N):
t = [float(x) / samp_rate for x in range(N)]
y = [math.sin(2.*math.pi*freq*x) for x in t]
return y
class test_pfb_arb_resampler(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_fff_000(self):
N = 500 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 2.3421 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 2, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 121.213
data = sig_source_f(fs, freq, 1, N)
signal = blocks.vector_source_f(data)
pfb = filter.pfb_arb_resampler_fff(rrate, taps, nfilts)
snk = blocks.vector_sink_f()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertFloatTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccf_000(self):
N = 5000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 2.4321 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 2, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccf(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccf_001(self):
N = 50000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 0.75 # resampling rate
nfilts = 32
taps = filter.firdes.low_pass_2(nfilts, nfilts*fs, fs / 4, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccf(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccc_000(self):
N = 5000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 3.4321 # resampling rate
nfilts = 32
taps = filter.firdes.complex_band_pass_2(nfilts, nfilts*fs, 50, 400, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccc(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
dst_data = snk.data()
self.assertComplexTuplesAlmostEqual(expected_data[-Ntest:], dst_data[-Ntest:], 2)
def test_ccc_001(self):
N = 50000 # number of samples to use
fs = 5000.0 # baseband sampling rate
rrate = 0.715 # resampling rate
nfilts = 32
taps = filter.firdes.complex_band_pass_2(nfilts, nfilts*fs, 50, 400, fs / 10,
attenuation_dB=80,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
freq = 211.123
data = sig_source_c(fs, freq, 1, N)
signal = blocks.vector_source_c(data)
pfb = filter.pfb_arb_resampler_ccc(rrate, taps, nfilts)
snk = blocks.vector_sink_c()
self.tb.connect(signal, pfb, snk)
self.tb.run()
Ntest = 50
L = len(snk.data())
# Get group delay and estimate of phase offset from the filter itself.
delay = pfb.group_delay()
phase = pfb.phase_offset(freq, fs)
# Create a timeline offset by the filter's group delay
t = [float(x) / (fs*rrate) for x in range(-delay, L-delay)]
# Data of the sinusoid at frequency freq with the delay and phase offset.
expected_data = [math.cos(2.*math.pi*freq*x+phase) + \
1j*math.sin(2.*math.pi*freq*x+phase) for x in t]
|
joalcava/space_invaders
|
heart.py
|
Python
|
gpl-3.0
| 626 | 0.004808 |
#! /usr/bin/python3
import pygame
from colors import Colors
class Heart(pygame.sprite.Sprite):
def __init__(self, pos):
pygame.sprite.Sprite.__init__(self)
self.image, self.rect = self.load_image("heart.
|
png")
self.rect.x = pos[0]
|
self.rect.y = pos[1]
def load_image(self, name, colorkey=None):
image = pygame.image.load(name)
image = image.convert_alpha()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
return image, image.get_rect()
|
BigFatNoob-NCSU/x9115george2
|
hw/code/2/4_3.py
|
Python
|
mit
| 1,206 | 0.024876 |
__author__ = 'panzer'
from swampy.TurtleWorld import *
from math import radians, sin
def initTurtle(delay = 0.01):
"""
Initializes a turtle object
:param delay: Delay before each action of the turtle. Lower it is the faster the turtle moves.
:return: turtle object
"""
TurtleWorld()
t = Turtle()
t.delay = delay
return t
def isosceles(t, eq_side, ineq_side, angle):
"""
Draws an isosceles triangle
:param t: Turtle object
:param eq_side: Equal Side
:param ineq_side: Inequal Side
:param angle: Angle by the inequal side
:return: draws isosceles triangle
"""
fd(t, eq_side)
lt(t, angle)
fd(t, ineq_side)
lt(t, angle)
fd(t, eq_side)
def pie(t, n, length):
"""
Draws a pie
:param t: Turtle object
:param n: number of sides
:param lengt
|
h: length of each side
:return: Draws a Pie(Spiked polygon)
"""
angle = float(360.0/n)
eq_side = length/2.0/sin(radians(angle/2.0))
for _ in range(n):
isosceles(t, eq_side, length, 180 - (180 - angle)/2.0)
lt(t, 180)
if __name__ == '__main__':
# Figure 4.2 a
pie(initTurtle(), 5, 100)
# Figure 4.2 a
pie(initTurtle(), 6, 100)
# Figure 4.2 a
pie(initTurtle(), 7, 100
|
)
wait_for_user()
|
nschloe/pyfvm
|
src/pyfvm/__about__.py
|
Python
|
gpl-3.0
| 22 | 0 |
__version__
|
= "0.3.9"
| |
dave-the-scientist/brugia_project
|
get_knockout_info.py
|
Python
|
gpl-3.0
| 20,007 | 0.006998 |
"""
Notes:
- Brugia protein sequences: https://www.ncbi.nlm.nih.gov/bioproject/PRJNA10729
- wBm protein sequences: https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=292805
- BLASTP against Reference proteins (refseq protein) from Human, using BLOSUM45 matrix.
- BLASTP against nr proteins from O. volvulus and wOv, using BLOSUM45 matrix.
- Caution about the Oncho results; I'm not sure how many protein sequences have been annotated.
- The ChEMBL search results were performed under the "Target Search" tab on their website. Downloaded as a tab-deliminited file.
"""
import os, cPickle, pandas, re
from molecbio import sequ
from cobra.flux_analysis import single_reaction_deletion, double_reaction_deletion
from model_tools import load_model, id_bottleneck_metabolites
import xml.etree.ElementTree as ET
def get_rxns_to_delete(model):
rxn_to_genes = {}
for rxn in model.reactions:
if not rxn.gene_names or not rxn.id.startswith(('R', 'ACYLCOA', 'N00001')):
continue
rxn_to_genes[rxn.id] = [g.strip() for g in rxn.gene_names.split(';')]
return rxn_to_genes
def do_deletions(rxn_data, model, rxn_to_genes, do_double_ko=False, obj_fraction=0.0):
fraction_epsilon = 0.0001
orig_f = float(model.optimize().f)
s_rates, s_stats = single_reaction_deletion(model, list(rxn_to_genes.keys()))
print('Original objective %.1f; %i reactions knocked out.' % (orig_f, len(s_stats)))
print('Calculating model deficiencies for each knockout...')
for r_id, new_f in s_rates.items():
if abs(new_f) < fraction_epsilon:
new_f = 0.0
stat = s_stats[r_id]
if new_f/orig_f <= obj_fraction+fraction_epsilon:
if stat == 'optimal':
deficiencies = find_model_deficiencies(model, orig_f, new_f, r_id)
else:
deficiencies = 'infeasible'
rxn_data[r_id] = {'objective':round(new_f/orig_f*100, 1), 'deficiencies':deficiencies, 'genes':rxn_to_genes[r_id]}
if do_double_ko:
double_rxn_ids = [r for r in list(rxn_to_genes.keys()) if r not in rxn_data]
print('Performing double knockouts on %i candidates...' % len(double_rxn_ids)
|
)
double_ko_data = double_reaction_deletion(model, double_rxn_ids[:5], number_of_processes=3)
d_r1, d_r2, d_rates = double_ko_data['y'], double_ko_data['x'], double_ko_data['data']
def find_model_deficiencies(model, orig_f,
|
new_f, r_id):
deficiencies = []
ob = model.reactions.get_by_id(r_id).bounds
model.reactions.get_by_id(r_id).bounds = (0,0)
diffs = id_bottleneck_metabolites(model, new_f, 'BIOMASS', threshold=1.0)
for recovered_f, mtb_id in diffs:
def_str = '%s (%.1f)' % (mtb_id, recovered_f/orig_f*100)
sub_defs = []
for sub_f, sub_mtb_id in id_bottleneck_metabolites(model, new_f, mtb_id.upper(), threshold=1.0):
sub_defs.append('%s(%.1f)' % (sub_mtb_id, sub_f/orig_f*100))
if sub_defs:
def_str += ' [%s]' % ', '.join(sub_defs)
deficiencies.append(def_str)
model.reactions.get_by_id(r_id).bounds = ob
if not deficiencies:
return 'unrecoverable'
else:
return ', '.join(deficiencies)
def process_gene_data(rxn_data):
gene_data = {}
for r_id, data in rxn_data.items():
for gene in set(data['genes']):
g_entry = generate_gene_entry(data, r_id, gene)
gene_data.setdefault(gene, []).append(g_entry)
for gene, entries in gene_data.items():
rs_per_g = len(entries)
if rs_per_g > 1:
for e in entries:
e['num_reactions'] = rs_per_g
return gene_data
def generate_gene_entry(r_data, r_id, gene):
g_data = {}
if len(set(r_data['genes'])) == 1:
g_data['other_genes'] = ''
else:
g_data['other_genes'] = ','.join(sorted(list(set(r_data['genes']) - set([gene]))))
g_data['reaction'] = r_id
g_data['objective'] = r_data['objective']
g_data['deficiencies'] = r_data['deficiencies']
g_data['num_reactions'] = 1
return g_data
# # # Save/load functions
def save_data_object(data_obj, file_path):
with open(file_path, 'wb') as f:
cPickle.dump(data_obj, f, protocol=0)
print('Saved data to %s' % file_path)
def load_data_object(file_path):
with open(file_path, 'rb') as f:
data_obj = cPickle.load(f)
print('Loaded data from %s' % file_path)
return data_obj
def save_data_to_excel(gene_data, gene_data_out_file, expression_headings):
min_column_width = 10
header_bg = '#DEEDED'
sheet_name = 'Single knockouts'
gene_header = 'Gene ID'
headers_atts = [('# Reactions','num_reactions'), ('Reaction','reaction'), ('Associated genes','other_genes'), ('Objective %','objective'), ('Biomass deficiencies','deficiencies')]
ortho_headers = ['Human homologs\n(#|% identity|% coverage)', 'O. volvulus homologs\n(#|% identity|% coverage)']
chembl_headers = ['# ChEMBL hits', 'ChEMBL hits\n(% identity|species)']
data = {h[0]:[] for h in headers_atts+expression_headings}
for h in [gene_header] + ortho_headers + chembl_headers:
data[h] = []
gene_order = sorted(list(gene_data.keys()))
gene_order.sort(key=lambda g:gene_data[g][0]['deficiencies'])
for gene in gene_order:
for g_data in gene_data[gene]:
data[gene_header].append(gene)
for h, att in headers_atts:
data[h].append(g_data.get(att, 'NOT FOUND'))
human_hlogs = '%i | %.1f | %.1f' % (g_data['num_human_prots'], g_data['human_prot_identity'],g_data['human_prot_coverage']) if g_data['num_human_prots'] else ' '
data[ortho_headers[0]].append(human_hlogs)
oncho_hlogs = '%i | %.1f | %.1f' % (g_data['num_oncho_prots'], g_data['oncho_prot_identity'],g_data['oncho_prot_coverage']) if g_data['num_oncho_prots'] else ' '
data[ortho_headers[1]].append(oncho_hlogs)
data[chembl_headers[0]].append(g_data.get('num_chembl_hits', 0))
data[chembl_headers[1]].append(g_data.get('chembl_hits', ''))
if '_max_observed_expression' in g_data['expression_levels']:
max_expression = round(g_data['expression_levels']['_max_observed_expression'], 1)
else:
max_expression = " "
data[expression_headings[0][0]].append(max_expression)
for h, ls in expression_headings[1:]:
exp_levels = [g_data['expression_levels'].get(l) for l in ls]
data[h].append(' | '.join(exp_levels))
col_headers = [gene_header] + [h[0] for h in headers_atts] + [i for i in ortho_headers+chembl_headers] + [j[0] for j in expression_headings]
writer = pandas.ExcelWriter(gene_data_out_file, engine='xlsxwriter')
df = pandas.DataFrame(data)[col_headers] # The [] specifies the order of the columns.
df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=1, header=False)
worksheet = writer.sheets[sheet_name]
header_format = writer.book.add_format({'bold': True, 'text_wrap': True, 'align': 'center', 'valign': 'top', 'bg_color': header_bg, 'border': 1})
for i, h in enumerate(col_headers):
col_w = max(len(line.strip()) for line in h.splitlines())
col_width = max(col_w+1, min_column_width)
if i in (0, 2, 3, 5, 9):
col_format = writer.book.add_format({'align': 'left'})
elif i == 10:
col_format = writer.book.add_format({'align': 'center'})
else:
col_format = writer.book.add_format({'align': 'center'})
worksheet.set_column(i, i, col_width, col_format)
worksheet.write(0, i, h, header_format) # Header added manually.
worksheet.freeze_panes(1, 0) # Freezes header row.
writer.save()
print('Data saved to %s' % gene_data_out_file)
# # # Getting protein names and sequences
def save_prot_names_list(gene_data):
prot_list_file = 'utility/b_mal_4.5-wip_single_ko_prot_names.txt'
prot_list = sorted(gene_data.keys())
with open(prot_list_file, 'w') as f:
f.write('\n'.join(prot_list))
print('Saved protein list to %s' % prot_list_file)
def get_p
|
WladimirSidorenko/SentiLex
|
scripts/rao.py
|
Python
|
mit
| 8,962 | 0 |
#!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Rao and Ravichandran's method (2009).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from blair_goldensohn import build_mtx, seeds2seedpos
from common import POSITIVE, NEGATIVE, NEUTRAL
from graph import Graph
from itertools import chain
from scipy import sparse
import numpy as np
import sys
##################################################################
# Constants
POS_IDX = 0
NEG_IDX = 1
NEUT_IDX = 2
POL_IDX = 1
SCORE_IDX = 2
MAX_I = 300
IDX2CLS = {POS_IDX: POSITIVE, NEG_IDX: NEGATIVE, NEUT_IDX: NEUTRAL}
##################################################################
# Methods
def _eq_sparse(a_M1, a_M2):
"""Compare two sparse matrices.
@param a_M1 - first sparse matrix to compare
@param a_M2 - second sparse matrix to compare
@return True if both matrices are equal, non-False otherwise
"""
if type(a_M1) != type(a_M2):
return False
if not np.allclose(a_M1.get_shape(), a_M1.get_shape()):
return False
X, Y = a_M1.nonzero()
IDX1 = set([(x, y) for x, y in zip(X, Y)])
X, Y = a_M2.nonzero()
IDX2 = [(x, y) for x, y in zip(X, Y) if (x, y) not in IDX1]
IDX = list(IDX1)
IDX.extend(IDX2)
IDX.sort()
for x_i, y_i in IDX:
# print("a_M1[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M1[x_i, y_i]))
# print("a_M2[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M2[x_i, y_i]))
# print("is_close", np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]))
if not np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]):
return False
return True
def _mtx2tlist(a_Y, a_term2idx):
"""Convert matrix to a list of polar terms.
@param a_Y - matrix of polar terms
@param a_terms2idx - mapping from terms to their matrix indices
@return list of 3-tuples (word, polarity, score)
"""
ret = []
iscore = 0.
irow = None
lex2lidx = {}
ipol = lidx = 0
for (iword, ipos), idx in a_term2idx.iteritems():
# obtain matrix row for that te
|
rm
irow = a_Y.getrow(idx).toarray()
# print("irow =", repr(irow))
ipol = irow.argmax(axis=1)[0]
iscore = irow[0, ipol]
# print("ipol =", repr(ipol))
# print("iscore =", repr(iscore))
if ipol != NEUT_IDX:
ipol = IDX2CLS[ipol]
if iword in lex2lidx:
lidx = lex2lidx[iword]
if abs(iscore) > abs(ret[li
|
dx][SCORE_IDX]):
ret[lidx][POL_IDX] = ipol
ret[lidx][SCORE_IDX] = iscore
else:
lex2lidx[iword] = len(ret)
ret.append([iword, ipol, iscore])
return ret
def _sign_normalize(a_Y, a_terms2idx, a_pos, a_neg, a_neut,
a_set_dflt=None):
"""Fix seed values and row-normalize the class matrix.
@param a_Y - class matrix to be changed
@param a_terms2idx - mapping from terms to their matrix indices
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_set_dflt - function to set the default value of an unkown term
@return void
@note modifies the input matrix in place
"""
seed_found = False
for iterm, i in a_terms2idx.iteritems():
if iterm in a_pos:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, POS_IDX] = 1.
elif iterm in a_neg:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEG_IDX] = 1.
elif iterm in a_neut:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEUT_IDX] = 1.
elif a_set_dflt is not None:
a_set_dflt(a_Y, i)
assert seed_found, "No seed term found in matrix."
# normalize class scores
Z = a_Y.sum(1)
x, y = a_Y.nonzero()
for i, j in zip(x, y):
# print("a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
# print("Z[{:d}, 0] =".format(i), repr(Z[i, 0]))
a_Y[i, j] /= float(Z[i, 0]) or 1.
# print("*a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
def prune_normalize(a_M):
"""Make each of the adjacency matrix sum up to one.
Args:
a_M (scipy.sparse.csr): matrix to be normalized
Returns:
void:
Note:
modifies the input matrix in place
"""
# remove negative transitions
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
if a_M[i, j] < 0.:
a_M[i, j] = 0.
a_M.prune()
# normalize all outgoing transitions
Z = a_M.sum(0)
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
a_M[i, j] /= float(Z[0, j]) or 1.
def rao_min_cut(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the min-cut method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
sgraph = Graph(a_germanet, a_ext_syn_rels)
# partition the graph into subjective and objective terms
mcs, cut_edges, _, _ = sgraph.min_cut(a_pos | a_neg, a_neut, a_seed_pos)
print("min_cut_score (subj. vs. obj.) = {:d}".format(mcs),
file=sys.stderr)
# remove edges belonging to the min cut (i.e., cut the graph)
for isrc, itrg in cut_edges:
if isrc in sgraph.nodes:
sgraph.nodes[isrc].pop(itrg, None)
# separate the graph into positive and negative terms
mcs, _, pos, neg = sgraph.min_cut(a_pos, a_neg, a_seed_pos)
print("min_cut_score (pos. vs. neg.) = {:d}".format(mcs),
file=sys.stderr)
ret = [(inode[0], POSITIVE, 1.) for inode in pos]
ret.extend((inode[0], NEGATIVE, -1.) for inode in neg)
return ret
def rao_lbl_prop(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the lbl-prop method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
if a_seed_pos is None:
a_seed_pos = ["adj", "nomen", "verben"]
else:
a_seed_pos = [a_seed_pos]
a_pos = seeds2seedpos(a_pos, a_seed_pos)
a_neg = seeds2seedpos(a_neg, a_seed_pos)
a_neut = seeds2seedpos(a_neut, a_seed_pos)
# obtain and row-normalize the adjacency matrix
terms = set((ilex, ipos)
for isynid, ipos in a_germanet.synid2pos.iteritems()
for ilexid in a_germanet.synid2lexids[isynid]
for ilex in a_germanet.lexid2lex[ilexid]
)
terms2idx = {iterm: i for i, iterm in enumerate(terms)}
M = build_mtx(a_germanet, terms2idx, set(),
a_ext_syn_rels, len(terms))
prune_normalize(M)
# no need to transpose M[i, j] is the link going from node j to the node i;
# and, in Y, the Y[j, k] cell is the polarity score of the class k for the
# term j
# M = M.transpose()
# check that the matrix is column normalized
assert np.all(i == 0 or np.isclose([i], [1.])
for i in M.sum(0)[0, :])
# initialize label matrix
Y = sparse.lil_matrix((len(terms), len(IDX2CLS)), dtype=np.float32)
def _set_neut_one(X, i):
X[i, NEUT_IDX] = 1.
_sign_normalize(Y, terms2idx, a_pos, a_neg, a_neut,
|
JamesHyunKim/myhdl
|
hdlmake/tools/aldec/aldec.py
|
Python
|
gpl-3.0
| 3,541 | 0.003106 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013, 2014 CERN
# Author: Pawel Szostek (pawel.szostek@cern.ch)
# Multi-tool support by Javier D. Garcia-Lasheras (javier@garcialasheras.com)
#
# This file is part of Hdlmake.
#
# Hdlmake is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hdlmake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hdlmake. If not, see <http://www.gnu.org/licenses/>.
#
import string
from string import Template
import fetch
from makefile_writer import MakefileWriter
import logging
class ToolControls(MakefileWriter):
def detect_version(self, path):
pass
def get_keys(self):
tool_info = {
'name': 'Aldec Active-HDL',
'id': 'aldec',
'windows_bin': 'vsimsa',
'linux_bin': None
}
return tool_info
def get_standard_libraries(self):
ALDEC_STANDARD_LIBS = ['ieee', 'std']
return ALDEC_STANDARD_LIBS
def generate_simulation_makefile(self, fileset, top_module):
# TODO: ??
from srcfile import VHDLFile, VerilogFile, SVFile
makefile_tmplt_1
|
= string.Template("""TOP_MODULE := ${top_module}
ALDEC_CRAP := \
run.command \
li
|
brary.cfg
#target for performing local simulation
sim: sim_pre_cmd
""")
makefile_text_1 = makefile_tmplt_1.substitute(
top_module=top_module.top_module
)
self.write(makefile_text_1)
self.writeln("\t\techo \"# Active-HDL command file, generated by HDLMake\" > run.command")
self.writeln()
self.writeln("\t\techo \"# Create library and set as default target\" >> run.command")
self.writeln("\t\techo \"alib work\" >> run.command")
self.writeln("\t\techo \"set worklib work\" >> run.command")
self.writeln()
self.writeln("\t\techo \"# Compiling HDL source files\" >> run.command")
for vl in fileset.filter(VerilogFile):
self.writeln("\t\techo \"alog " + vl.rel_path() + "\" >> run.command")
for sv in fileset.filter(SVFile):
self.writeln("\t\techo \"alog " + sv.rel_path() + "\" >> run.command")
for vhdl in fileset.filter(VHDLFile):
self.writeln("\t\techo \"acom " + vhdl.rel_path() + "\" >> run.command")
self.writeln()
makefile_tmplt_2 = string.Template("""
\t\tvsimsa -do run.command
sim_pre_cmd:
\t\t${sim_pre_cmd}
sim_post_cmd: sim
\t\t${sim_post_cmd}
#target for cleaning all intermediate stuff
clean:
\t\trm -rf $$(ALDEC_CRAP) work
#target for cleaning final files
mrproper: clean
\t\trm -f *.vcd *.asdb
.PHONY: mrproper clean sim sim_pre_cmd sim_post_cmd
""")
if top_module.sim_pre_cmd:
sim_pre_cmd = top_module.sim_pre_cmd
else:
sim_pre_cmd = ''
if top_module.sim_post_cmd:
sim_post_cmd = top_module.sim_post_cmd
else:
sim_post_cmd = ''
makefile_text_2 = makefile_tmplt_2.substitute(
sim_pre_cmd=sim_pre_cmd,
sim_post_cmd=sim_post_cmd,
)
self.write(makefile_text_2)
|
benschmaus/catapult
|
telemetry/telemetry/internal/actions/loop_unittest.py
|
Python
|
bsd-3-clause
| 2,356 | 0.002971 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.internal.actions import loop
from telemetry.testing import tab_test_case
import py_utils
AUDIO_1_LOOP_CHECK = 'window.__hasEventCompleted("#audio_1", "loop");'
VIDEO_1_LOOP_CHECK = 'window.__hasEventCompleted("#video_1", "loop");'
class LoopActionTest(tab_test_case.TabTestCase):
def setUp(self):
tab_test_case.TabTestCase.setUp(self)
self.Navigate('video_test.html')
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithNoSelector(self):
"""Tests that with no selector Loop action loops first media element."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
|
timeout_in_seconds=10)
action.WillRunAction(self._tab)
action.RunAction(sel
|
f._tab)
# Assert only first video has played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWithAllSelector(self):
"""Tests that Loop action loops all video elements with selector='all'."""
action = loop.LoopAction(loop_count=2, selector='all',
timeout_in_seconds=10)
action.WillRunAction(self._tab)
# Both videos not playing before running action.
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertFalse(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
action.RunAction(self._tab)
# Assert all media elements played.
self.assertTrue(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertTrue(self._tab.EvaluateJavaScript(AUDIO_1_LOOP_CHECK))
@decorators.Disabled('android', 'linux') # crbug.com/418577
def testLoopWaitForLoopTimeout(self):
"""Tests that wait_for_loop timeout_in_secondss if video does not loop."""
action = loop.LoopAction(loop_count=2, selector='#video_1',
timeout_in_seconds=1)
action.WillRunAction(self._tab)
self.assertFalse(self._tab.EvaluateJavaScript(VIDEO_1_LOOP_CHECK))
self.assertRaises(py_utils.TimeoutException, action.RunAction, self._tab)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/sankey/link/hoverlabel/_font.py
|
Python
|
mit
| 1,877 | 0.000533 |
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="sankey.link.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
|
Provide multiple font f
|
amilies, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
earl/beanstalkc
|
beanstalkc.py
|
Python
|
apache-2.0
| 10,921 | 0.000458 |
#!/usr/bin/env python
"""beanstalkc - A beanstalkd Client Library for Python"""
import logging
import socket
import sys
__license__ = '''
Copyright (C) 2008-2016 Andreas Bolka
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__version__ = '0.4.0'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 11300
DEFAULT_PRIORITY = 2 ** 31
DEFAULT_TTR = 120
DEFAULT_TUBE_NAME = 'default'
class BeanstalkcException(Exception): pass
class UnexpectedResponse(BeanstalkcException): pass
class CommandFailed(BeanstalkcException): pass
class DeadlineSoon(BeanstalkcException): pass
class SocketError(BeanstalkcException):
@staticmethod
def wrap(wrapped_function, *args, **kwargs):
try:
return wrapped_function(*args, **kwargs)
except socket.error:
err = sys.exc_info()[1]
raise SocketError(err)
class Connection(object):
def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, parse_yaml=True,
connect_timeout=socket.getdefaulttimeout()):
if parse_yaml is True:
try:
parse_yaml = __import__('yaml').load
except ImportError:
logging.error('Failed to load PyYAML, will not parse YAML')
parse_yaml = False
self._connect_timeout = connect_timeout
self._parse_yaml = parse_yaml or (lambda x: x)
self.host = host
self.port = port
self.connect()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def connect(self):
"""Connect to beanstalkd server."""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.settimeout(self._connect_timeout)
SocketError.wrap(self._socket.connect, (self.host, self.port))
self._socket.settimeout(None)
self._socket_file = self._socket.makefile('rb')
def close(self):
"""Close connection to server."""
try:
self._socket.sendall('quit\r\n')
except socket.error:
pass
try:
self._socket.close()
except socket.error:
pass
def reconnect(self):
"""Re-connect to server."""
self.close()
self.connect()
def _interact(self, command, expected_ok, expected_err=[]):
SocketError.wrap(self._socket.sendall, command)
status, results
|
= self._read_response()
if status in expected_ok:
return results
elif status in expected_err:
raise CommandFailed(command.split()[0], status, results)
else:
raise UnexpectedResponse(command.split()[0], status, results)
def _
|
read_response(self):
line = SocketError.wrap(self._socket_file.readline)
if not line:
raise SocketError()
response = line.split()
return response[0], response[1:]
def _read_body(self, size):
body = SocketError.wrap(self._socket_file.read, size)
SocketError.wrap(self._socket_file.read, 2) # trailing crlf
if size > 0 and not body:
raise SocketError()
return body
def _interact_value(self, command, expected_ok, expected_err=[]):
return self._interact(command, expected_ok, expected_err)[0]
def _interact_job(self, command, expected_ok, expected_err, reserved=True):
jid, size = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return Job(self, int(jid), body, reserved)
def _interact_yaml(self, command, expected_ok, expected_err=[]):
size, = self._interact(command, expected_ok, expected_err)
body = self._read_body(int(size))
return self._parse_yaml(body)
def _interact_peek(self, command):
try:
return self._interact_job(command, ['FOUND'], ['NOT_FOUND'], False)
except CommandFailed:
return None
# -- public interface --
def put(self, body, priority=DEFAULT_PRIORITY, delay=0, ttr=DEFAULT_TTR):
"""Put a job into the current tube. Returns job id."""
assert isinstance(body, str), 'Job body must be a str instance'
jid = self._interact_value('put %d %d %d %d\r\n%s\r\n' % (
priority, delay, ttr, len(body), body),
['INSERTED'],
['JOB_TOO_BIG', 'BURIED', 'DRAINING'])
return int(jid)
def reserve(self, timeout=None):
"""Reserve a job from one of the watched tubes, with optional timeout
in seconds. Returns a Job object, or None if the request times out."""
if timeout is not None:
command = 'reserve-with-timeout %d\r\n' % timeout
else:
command = 'reserve\r\n'
try:
return self._interact_job(command,
['RESERVED'],
['DEADLINE_SOON', 'TIMED_OUT'])
except CommandFailed:
exc = sys.exc_info()[1]
_, status, results = exc.args
if status == 'TIMED_OUT':
return None
elif status == 'DEADLINE_SOON':
raise DeadlineSoon(results)
def kick(self, bound=1):
"""Kick at most bound jobs into the ready queue."""
return int(self._interact_value('kick %d\r\n' % bound, ['KICKED']))
def kick_job(self, jid):
"""Kick a specific job into the ready queue."""
self._interact('kick-job %d\r\n' % jid, ['KICKED'], ['NOT_FOUND'])
def peek(self, jid):
"""Peek at a job. Returns a Job, or None."""
return self._interact_peek('peek %d\r\n' % jid)
def peek_ready(self):
"""Peek at next ready job. Returns a Job, or None."""
return self._interact_peek('peek-ready\r\n')
def peek_delayed(self):
"""Peek at next delayed job. Returns a Job, or None."""
return self._interact_peek('peek-delayed\r\n')
def peek_buried(self):
"""Peek at next buried job. Returns a Job, or None."""
return self._interact_peek('peek-buried\r\n')
def tubes(self):
"""Return a list of all existing tubes."""
return self._interact_yaml('list-tubes\r\n', ['OK'])
def using(self):
"""Return the tube currently being used."""
return self._interact_value('list-tube-used\r\n', ['USING'])
def use(self, name):
"""Use a given tube."""
return self._interact_value('use %s\r\n' % name, ['USING'])
def watching(self):
"""Return a list of all tubes being watched."""
return self._interact_yaml('list-tubes-watched\r\n', ['OK'])
def watch(self, name):
"""Watch a given tube."""
return int(self._interact_value('watch %s\r\n' % name, ['WATCHING']))
def ignore(self, name):
"""Stop watching a given tube."""
try:
return int(self._interact_value('ignore %s\r\n' % name,
['WATCHING'],
['NOT_IGNORED']))
except CommandFailed:
# Tried to ignore the only tube in the watchlist, which failed.
return 0
def stats(self):
"""Return a dict of beanstalkd statistics."""
return self._interact_yaml('stats\r\n', ['OK'])
def stats_tube(self, name):
"""Return a dict of stats about a given tube."""
return self._interact_yaml('stats-tube %s\r\n' % name,
['OK'],
['NOT_FOUND'])
def pause_tube(self, name, delay):
"""Pau
|
bowenliu16/deepchem
|
deepchem/dock/__init__.py
|
Python
|
gpl-3.0
| 637 | 0.00157 |
"""
Imports all submodul
|
es
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from deepchem.dock.pose_generation import PoseGenerator
from deepchem.dock.pose_generation import VinaPoseG
|
enerator
from deepchem.dock.pose_scoring import PoseScorer
from deepchem.dock.pose_scoring import GridPoseScorer
from deepchem.dock.docking import Docker
from deepchem.dock.docking import VinaGridRFDocker
from deepchem.dock.docking import VinaGridDNNDocker
from deepchem.dock.binding_pocket import ConvexHullPocketFinder
from deepchem.dock.binding_pocket import RFConvexHullPocketFinder
|
mapattacker/cheatsheets
|
python/pyspark.py
|
Python
|
mit
| 10,361 | 0.02326 |
import findspark #pyspark can't be detected if file is at other folders than where it is installed
findspark.init('/home/jake/spark/spark-2.2.0-bin-hadoop2.7')
## 1) SPARK DATAFRAME
#--------------------------------------------------------
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
spark = SparkSession.builder.appName("Basics").getOrCreate() #appName can be anything
## READING
#--------------------------------------------------------
df = spark.read.json('people.json') #json
df = spark.read.csv('appl_stock.csv', inferSchema=True, header=True) #csv
df = spark.read.csv(r'/home/jake/Desktop/test3.txt') #text
df.show()
df.show(20, False) #show non-truncated results
df.head() #shows a list of row objects
# [Row(age=None, name='Michael'), Row(age=30, name='Andy')]
## WRITING
#--------------------------------------------------------
# csv
df.toPandas().to_csv("sample.csv", header=True)
# will auto write to hdfs
#best to check & define column data types first
df.write.option('path','jake/foldername/operator_lookup.parquet').partitionBy("datestring").format("parquet").saveAsTable("operator_lookup")
## BASICS
#--------------------------------------------------------
df[:10].collect() #collect the result instead of showing
row.asDict() #produce as dictionary
df.show() #print the results
df.count() # print row count
len(df.columns) #print column count
df.printSchema() #print schema, datatypes, nullable
## SCHEMA & DATATYPES
#--------------------------------------------------------
#changing the schema
from pyspark.sql.types import StructField,StringType,IntegerType,StructType
# true = nullable, false = non-nullable
schema = StructType([StructField("age", IntegerType(), True),
StructField("name", StringType(), True)])
df = spark.read.json('people.json', schema)
df.printSchema()
# FORMAT DECIMAL PLACES
sales_std.select(format_number('std',2)).show()
# CONVERT DATATYPE
df = df.withColumn("Acct-Session-Time", df["Acct-Session-Time"].cast("integer"))
## CREATE DATAFRAME
#--------------------------------------------------------
# value, column name
df = sqlContext.createDataFrame([('cat \n\n elephant rat \n rat cat', )], ['word'])
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# empty dataframe
from pyspark.sql.types import StructType, StructField, IntegerType, StringType
schema = StructType([StructField("k", StringType(), True), StructField("v", IntegerType(), False)])
# or df = sc.parallelize([]).toDF(schema)
df = spark.createDataFrame([], schema)
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# from pandas
df = pd.DataFrame([("foo", 1), ("bar", 2)], columns=("k", "v"))
sqlCtx = SQLContext(sc)
sqlCtx.createDataFrame(df).show()
<<<<<<< HEAD
=======
>>>>>>> d3e08e82105d237a7b8091d0368a90829943847f
# from dictionary
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkContext
dict = {1: 'test', 2: 'test2'}
sc = SparkContext()
spark = SparkSession(sc)
rdd = spark.parallelize([dict])
#toDF() needs a SparkSession, must be a row type before conversion to df
rdd.map(lambda x: Row(**x)).toDF().show() # **x transposes the row
# OR using createDataFrame
rdd = rdd.map(lambda x: Row(**x))
spark.createDataFrame(rdd).show()
## APPENDING NEW DATA
#--------------------------------------------------------
firstDF = spark.range(3).toDF("myCol")
newRow = spark.createDataFrame([[20]])
appended = firstDF.union(newRow)
display(appended)
## EXPLORATORY
#--------------------------------------------------------
df.describe() #show datatypes
df.describe().show() #show max, min, stdev
## COLUMNS
#--------------------------------------------------------
df.columns #show column names
df.select('age').show() #have to use select to choose entire column
df.select(['age','name']).show() #multiple columns
# NEW COLUMNS
# Adding a new column with a simple copy
df.withColumn('newage',df['age']).show()
df.withColumn('add_one_age',df['age']+1).show() #with calculation
# RENAME COLUMN
df = df.withColumnRenamed('age','supernewage')
# DROP COLUMNS
df.drop('columnName')
## SQL
#--------------------------------------------------------
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
spark.sql("SELECT * FROM people WHERE age=30").show()
# ORDER BY
df.orderBy("Sales").show() #ascending
df.orderBy(df["Sales"].desc()).show() #descending
# REPLACE
from spark.sql.functions import *
df = df.withColumn('address', regexp_replace('address', 'lane', 'ln')) #in column address, replace lane with ln
## UDF (User-Defined Function)
#--------------------------------------------------------
from pyspark.sql.functions import udf
# using normal function
def CountryCode(input):
something
return something_else
udf_CountryCode = udf(CountryCode)
df = df.select("*", udf_CountryCode(df['target_column']).alias('new_column'))
# using udf lambda
udf_UserName = udf(lambda x: x.split('@')[0])
df = df.select("*", df('target_column').alias('new_column'))
## NULL VALUES
#--------------------------------------------------------
# DROP NAN
# Drop any row that contains missing data
df.na.drop().show()
# Has to have at least 2 NON-null values in a row
df.na.drop(thresh=2).show()
# rows in Sales that have null
df.na.drop(subset=["Sales"]).show()
# rows that have any nulls
df.na.drop(how='any').show()
# rows that have all nulls
df.na.drop(how='all').show()
# FILL NAN
# Spark is actually smart enough to match up & fill the data types.
# only fill in strings
df.na.fill('NEW VALUE').show()
# only fill in numeric
df.na.fill(0).show()
# fill in specific column
df.na.fill('No Name',subset=['Name']).show()
# fill in values with mean
df.na.fill(df.select(mean(df['Sales'])).collect()[0][0],['Sales']).show()
## FILTERING
#--------------------------------------------------------
df.filter("Close < 500").show() #SQL synatx
df.filter(df["Close"] < 500).show() #Python synatx
df.filter("Close<500").select(['Open','Close']).show()
#Multiple conditions
df.filter( (df["Close"] < 200) & (df['Open'] > 200) ).show() #AND &
df.filter( (df["Close"] < 200) | (df['Open'] > 200) ).show() #OR |
df.filter( (df["Close"] < 200) & ~(df['Open'] < 200) ).show() #NOT ~
df.filter(df["Low"] == 197.16).show()
## AGGREGATE
#--------------------------------------------------------
df.groupBy("Company").mean().show() #Mean
df.groupBy("Company").count().show() #Count
df.groupBy("Company").max().show() #Max
df.groupBy("Company").min().show() #Min
df.groupBy("Company").sum().show() #Sum
df.agg({'Sales':'max'}).show() #aggregate across all rows to get one result
from pyspark.sql.functions import countDistinct, avg, stddev
df.select(countDistinct("Sales")).show() #count distinct
df.select(countDistinct("Sales").alias("Distinct Sales")).show() #change alias name
df.select(avg('Sales')).show() #average
df.select(stddev("Sales")).show() #stdev
## DATETIME
#--------------------------------------------------------
from pyspark.sql.functions import (format_number, dayofmonth, hour,
dayofyear, month, year,
weekofyear, date_format)
df.select(dayofmonth(df['Date'])).show() #date of month
df.select(hour(df['Date'])).show() #hour
df.select(dayofyear(df['Date'])).show() #day of year
df.select(month(df['Date'])).show() #month
df
|
.select(year(df['Date'])).show() #year
## 2) USING RDD (Resilient Distributed Dataset)
# spark is transiting slowly to spark dataframe, but its stil good to learn the original parsing in RDD
# especially when data is non-datafra
|
me type
#--------------------------------------------------------
from pyspark import SparkConf, SparkContext
# set configuration & spark context object
conf = SparkConf().setMaster("local").setAppName("MinTemperatures")
|
songrun/VectorSkinning
|
src/bezier_constraint_odd_solver.py
|
Python
|
apache-2.0
| 9,730 | 0.063412 |
from generate_chain_system import *
class BezierConstraintSolverOdd( BezierConstraintSolver ):
'''
Free direction, magnitude fixed (for G1 or A).
'''
def update_system_with_result_of_previous_iteration( self, solution ):
### Iterate only over the parts of the matrix that will change,
### such as the lagrange multipliers across G1 or A edges and the right-hand-side.
solution = asarray(solution)
num = len(self.bundles)
assert solution.shape == (num, 4, 2)
for i in range(num):
dir1 = dir_allow_zero( solution[i][1]-solution[i][0] )
dir2 = dir_allow_zero( solution[i][2]-solution[i][3] )
self.bundles[i].directions[0] = dir1
self.bundles[i].directions[1] = dir2
mag1 = mag( solution[i][1]-solution[i][0] )
mag2 = mag( solution[i][2]-solution[i][3] )
self.bundles[i].magnitudes[0] = mag1
self.bundles[i].magnitudes[1] = mag2
## The lagrange multipliers changed, but not the locations of the zeros.
self._update_bundles( lagrange_only = True )
self.system_factored = None
## UPDATE: Actually, if fixed angles are parallel or perpendicular,
## then the lagrange multiplier systems may gain
## or lose zeros. So, reset the symbolic factorization.
## UPDATE 2: If we could update_bundles once with all fixed angles
## not parallel or perpendicular, and then compute the symbolic
## factorization, we could keep it.
## UPDATE 3: Let's try it assuming that the first time through there are no zeros.
## UPDATE 4: I tried it and it makes no difference to performance at all
## up to alec's alligator. So, we'll reset the symbolic factorization
## in case the initial configuration has zeros.
self.system_symbolic_factored = None
def solve( self ):
dim = 2
num = len(self.bundles)
#print 'rhs:'
#print self.rhs.tolist()
if self.system_symbolic_factored is None:
#print 'odd symbolic factoring'
system = self.to_system_solve_t( self.system )
self.system_symbolic_factored = self.compute_symbolic_factorization( system )
self.system_factored = self.system_symbolic_factored( system )
elif self.system_factored is None:
#print 'odd numeric factoring'
system = self.to_system_solve_t( self.system )
self.system_factored = self.system_symbolic_factored( system )
#print 'odd solve'
x = self.system_factored( self.rhs )
# x = linalg.solve( self.system, self.rhs )
# x = scipy.sparse.linalg.spsolve( self.system, self.rhs )
### Return a nicely formatted chain of bezier curves.
x = array( x[:self.total_dofs] ).reshape(-1,4).T
solution = []
for i in range(num):
P = x[:, i*dim:(i+1)*dim ]
solution.append( P )
if parameters.kClampOn == True: solution = clamp_solution( self.bundles, solution )
return solution
def lagrange_equations_for_fixed_opening( self, bundle, is_head ):
## handle the case of open end path.
dofs = self.compute_dofs_per_curve(bundle)
dim = 2
R = zeros( ( sum(dofs), dim ) )
rhs = zeros(R.shape[1])
if is_head:
# assert bundle.constraints[0][1] == True
fixed_positions = bundle.control_points[0][:2]
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P1x' - constraint_X' ) = 0
lambda2 * ( P1y' - constraint_Y' ) = 0
'''
for i in range( dim ):
R[i*4, i] = 1
rhs = fixed_positions
else:
# assert bundle.constraints[-1][1] == True
fixed_positions = bundle.control_points[-1][:2]
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - constraint_X' ) = 0
lambda2 * ( P4y' - constraint_Y' ) = 0
'''
for i in range( dim ):
R[i*4+3, i] = 1
rhs = fixed_positions
return R.T, rhs
def lagrange_equations_for_curve_constraints( self, bundle0, bundle1, angle ):
mag0, mag1 = bundle0.magnitudes[1], bundle1.magnitudes[0]
cos_theta = angle[0]
sin_theta = angle[1]
dim = 2
dofs0 = self.compute_dofs_per_curve(bundle0)
dofs1 = self.compute_dofs_per_curve(bundle1)
dofs = sum(dofs0) + sum(dofs1)
smoothness = bundle0.constraints[1][0]
if smoothness == 'C0': ## C0
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - Q1x' ) = 0
lambda2 * ( P4y' - Q1y' ) = 0
'''
R = zeros( ( dofs, dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0) + i*4, i] = -1
elif smoothness == 'A': ## fixed angle
'''
Boundary Conditions are as follows:
lambda1 * ( P4x - Q1x ) = 0
lambda2 * ( P4y - Q1y ) = 0
lambda3 * ( mag1(P4x-P3x) + mag0[cos_theta(Q2x-Q1x)-sin_theta(Q2y-Q1y)] ) = 0
lambda4 * ( mag1(P4y-P3y) + mag0[sin_theta(Q2x-Q1x)+cos_theta(Q2y-Q1y)] ) = 0
'''
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0):sum(dofs0)+dim, dim:] = asarray([[-cos_theta, sin_theta], [cos_theta, -sin_theta]])
R[-dim*2:-dim, dim:] = asarray([[-sin_theta, -cos_theta], [sin_theta, cos_theta]])
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
elif smoothness == 'C1': ## C1
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - Q1x' ) = 0
lambda2 * ( P4y' - Q1y' ) = 0
lambda3 * ( w_q(P4x' - P3x') + w_p(Q1x' - Q2x')) = 0
lambda4 * ( w_q(P4y' - P3y') + w_p(Q1y' - Q2y')) = 0
'''
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0)+i*4+1, i+dim] = -1
R[sum(dofs0)+i*4, i+dim] = 1
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
elif smoothness == 'G1': ## G1
R = zeros( ( dofs, 2*dim ) )
for i in range( dim ):
R[i*4+3, i] = 1
R[sum(dofs0)+i*4, i] = -1
R[i*4+3, i+dim] = 1
R[i*4+2, i+dim] = -1
R[sum(dofs0)+i*4+1, i+dim] = -1
R[sum(dofs0)+i*4, i+dim] = 1
## add weights to lambda
R[ :sum(dofs0), dim: ] *= mag1
R[ sum(dofs0):, dim: ] *= mag0
else:
R = zeros( ( dofs, 0 ) )
rhs = zeros(R.shape[1])
fixed_positions = bundle0.control_points[-1][:2]
is_fixed = bundle0.constraints[1][1]
assert type( is_fixed ) == bool
if is_fixed:
fixed_positions = asarray(fixed_positions)
'''
Boundary Conditions are as follows:
lambda1 * ( P4x' - constraint_X' ) = 0
lambda2 * ( P4y' - constraint_Y' ) = 0
'''
R2 = zeros( ( dofs, dim ) )
for i in range( dim ):
R2[i*4+3, i] = 1
R = concatenate((R, R2), axis=1)
rhs = concatenate((rhs, fixed_positions))
return R.T, rhs
def system_for_curve( self, bundle ):
'''
## A is computed using Sage, integral of (tbar.T * tbar) with respect to t.
# A = asarray( [[ 1./7, 1./6, 1./5, 1./4], [ 1./6, 1./5, 1./4, 1./3],
# [ 1./5, 1./4, 1./3, 1./2], [1./4, 1./3, 1./2, 1.]] )
## MAM is computed using Sage. MAM = M * A * M
'''
length = bundle.length
MAM = asarray( self.MAM )
dim = 2
Left = zeros((8, 8))
|
for i in range(dim):
Left[ i*4:(i+1)*4, i*4:(i+1)*4 ] = MAM[:,:]
return Left*length
def system_for_
|
curve_with_arc_length( self, bundle ):
'''
## Solve the same integral as system__for_curve only with dt replaced by ds
'''
length = bundle.length
ts = bundle.ts
dts = bundle.dts
dim = 2
Left = zeros( ( 8, 8 ) )
tbar = ones( ( 4, 1 ) )
MAM = zeros( ( 4, 4 ) )
for i in range(len(dts)):
t = (ts[i] + ts[i+1])/2
ds = dts[i]
tbar[0] = t*t*t
tbar[1] = t*t
tbar[2] = t
Mtbar = dot( M.T, tbar )
MAM += dot( Mtbar, Mtbar.T )*ds
for i in range( dim ):
Left[ i*4:( i+1 )*4, i*4:( i+1 )*4 ] = MAM[:,:]
return Left*length
def compute_dofs_per_curve( self, bundle ):
dofs = zeros( 2, dtype = int )
'''
assume open end points can only emerge at the endpoints
'''
for i, (smoothness, is_fixed) in enumerate(bundle.constraints):
if smoothness == 'C0': dofs[i] += 4 ## C0
elif smoothness == 'A': dofs[i] += 4 ##
|
jwren/intellij-community
|
python/testData/completion/dictLiteralCompletion/EmptyLiteralsInCallExpressionsWithQuotes/main.py
|
Python
|
apache-2.0
| 142 | 0 |
from typing import TypedDict
class Point(TypedDict):
x: int
y:
|
int
def is_even(x: Poin
|
t) -> bool:
pass
is_even({'<caret>'})
|
lixiangning888/whole_project
|
modules/signatures_merge_tmp/static_authenticode.py
|
Python
|
lgpl-3.0
| 876 | 0.002326 |
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Accuvant, Inc. (bspengler@accuvant.com)
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from lib.cuckoo.common.abstracts import Signature
class Authenticode(Signature):
name = "static_authenticode"
description = "提供一个Authenticode数字签名"
severity = 1
weight = -1
confidence = 30
categories = ["static"]
authors = ["Accuvant"]
minimum = "1.2"
def run(self):
found_sig = False
if "static" in se
|
lf.results:
if "digital_signers" in self.results["static"] and self.results["static"]["digital_
|
signers"]:
for sign in self.results["static"]["digital_signers"]:
self.data.append(sign)
found_sig = True
return found_sig
|
kalbermattenm/historic_cadastre
|
setup.py
|
Python
|
gpl-2.0
| 1,011 | 0.000989 |
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
|
name='historic_cadastre',
version='0.1',
description='SITN, a sitn project',
author='sitn',
author_email='sitn@ne.ch',
url='http://www.ne.ch/sitn',
install_requires=[
'pyramid',
'SQLAlchemy',
'transaction',
|
'pyramid_tm',
'pyramid_debugtoolbar',
'pyramid-mako',
'zope.sqlalchemy',
'waitress',
'sqlahelper',
'JSTools',
'httplib2',
'simplejson'
],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
entry_points={
'paste.app_factory': [
'main = historic_cadastre:main',
],
'console_scripts': [
'print_tpl = historic_cadastre.scripts.print_tpl:main',
],
},
)
|
plumgrid/plumgrid-nova
|
nova/api/openstack/compute/contrib/os_tenant_networks.py
|
Python
|
apache-2.0
| 7,948 | 0.000881 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import netaddr.core as netexc
from oslo.config import cfg
from webob import exc
from nova.api.openstack import extensions
from nova import context as nova_context
from nova import exception
import nova.network
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova import quota
CONF = cfg.CONF
try:
os_network_opts = [
cfg.BoolOpt("enable_network_quota",
default=False,
help="Enables or disables quotaing of tenant networks"),
cfg.StrOpt('use_neutron_default_nets',
default="False",
deprecated_name='use_quantum_default_nets',
help=('Control for checking for default networks')),
cfg.StrOpt('neutron_default_tenant_id',
default="default",
deprecated_name='quantum_default_tenant_id',
help=('Default tenant id when creating neutron '
'networks'))
]
CONF.register_opts(os_network_opts)
except cfg.DuplicateOptError:
# NOTE(jkoelker) These options are verbatim elsewhere this is here
# to make sure they are registered for our use.
pass
if CONF.enable_network_quota:
opts = [
cfg.IntOpt('quota_networks',
default=3,
help='number of private networks allowed per project'),
]
CONF.register_opts(opts)
QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
def network_dict(network):
return {"id": network.get("uuid") or network.get("id"),
"cidr": network.get("cidr"),
"label": network.get("la
|
bel")}
class NetworkController(object):
def __init__(self, network_api=None):
self.network_api = nova.network.API()
self._default_networks = []
def _refresh_default_networks(self):
self._default_networks = []
if CONF.use_neutron_default_nets == "True":
try:
self._default_networks = self._get_default_networks()
except Exception:
LOG.exception("Failed to get default netwo
|
rks")
def _get_default_networks(self):
project_id = CONF.neutron_default_tenant_id
ctx = nova_context.RequestContext(user_id=None,
project_id=project_id)
networks = {}
for n in self.network_api.get_all(ctx):
networks[n['id']] = n['label']
return [{'id': k, 'label': v} for k, v in networks.iteritems()]
def index(self, req):
context = req.environ['nova.context']
authorize(context)
networks = self.network_api.get_all(context)
if not self._default_networks:
self._refresh_default_networks()
networks.extend(self._default_networks)
return {'networks': [network_dict(n) for n in networks]}
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
return {'network': network_dict(network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=-1)
except Exception:
reservation = None
LOG.exception(_("Failed to update usages deallocating "
"network."))
LOG.info(_("Deleting network with id %s") % id)
try:
self.network_api.delete(context, id)
if CONF.enable_network_quota and reservation:
QUOTAS.commit(context, reservation)
response = exc.HTTPAccepted()
except exception.NetworkNotFound:
response = exc.HTTPNotFound(_("Network not found"))
return response
def create(self, req, body):
if not body:
raise exc.HTTPUnprocessableEntity()
context = req.environ["nova.context"]
authorize(context)
network = body["network"]
keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
"num_networks"]
kwargs = dict((k, network.get(k)) for k in keys)
label = network["label"]
if not (kwargs["cidr"] or kwargs["cidr_v6"]):
msg = _("No CIDR requested")
raise exc.HTTPBadRequest(explanation=msg)
if kwargs["cidr"]:
try:
net = netaddr.IPNetwork(kwargs["cidr"])
if net.size < 4:
msg = _("Requested network does not contain "
"enough (2+) usable hosts")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrFormatError:
msg = _("CIDR is malformed.")
raise exc.HTTPBadRequest(explanation=msg)
except netexc.AddrConversionError:
msg = _("Address could not be converted.")
raise exc.HTTPBadRequest(explanation=msg)
networks = []
try:
if CONF.enable_network_quota:
reservation = QUOTAS.reserve(context, networks=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many networks.")
raise exc.HTTPBadRequest(explanation=msg)
try:
networks = self.network_api.create(context,
label=label, **kwargs)
if CONF.enable_network_quota:
QUOTAS.commit(context, reservation)
except Exception:
if CONF.enable_network_quota:
QUOTAS.rollback(context, reservation)
msg = _("Create networks failed")
LOG.exception(msg, extra=network)
raise exc.HTTPServiceUnavailable(explanation=msg)
return {"network": network_dict(networks[0])}
class Os_tenant_networks(extensions.ExtensionDescriptor):
"""Tenant-based Network Management Extension."""
name = "OSTenantNetworks"
alias = "os-tenant-networks"
namespace = ("http://docs.openstack.org/compute/"
"ext/os-tenant-networks/api/v2")
updated = "2012-03-07T09:46:43-05:00"
def get_resources(self):
ext = extensions.ResourceExtension('os-tenant-networks',
NetworkController())
return [ext]
def _sync_networks(context, project_id, session):
ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
ctx = ctx.elevated()
networks = nova.network.api.API().get_all(ctx)
return dict(networks=len(networks))
if CONF.enable_network_quota:
QUOTAS.register_resource(quota.ReservableResource('networks',
_sync_networks,
'quota_networks'))
|
postlund/home-assistant
|
tests/components/axis/test_switch.py
|
Python
|
apache-2.0
| 2,374 | 0.000842 |
"""Axis switch platform tests."""
from unittest.mock import Mock, call as mock_call
from homeassistant.components import axis
import homeassistant.components.switch as switch
from homeassistant.setup import async_setup_component
from .test_device import NAME, setup_axis_integration
EVENTS = [
{
"operation": "Initialized",
"topic": "tns1:Device/Trigger/Relay",
"source": "RelayToken",
"source_idx": "0",
"type": "LogicalState",
"value": "inactive",
},
{
"operation": "Initialized",
"topic": "tns1:Device/Trigger/Relay",
"source": "RelayToken",
"source_idx": "1",
"type": "LogicalState",
"value": "active",
},
]
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert await async_setup_component(
hass, switch.DOMAIN, {"switch": {"platform": axis.DOMAIN}}
)
assert axis.DOMAIN not in hass.data
async def test_no_switches(hass):
"""Test that no output events in Axis results in no switch entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids("switch")
async def test_switches(hass):
"""Test that switches are loaded properly."""
device = await setup_axis_integration(hass)
device.api.vapix.ports = {"0": Mock(), "1": Mock()}
device.api.vapix.ports["0"].name = "Doorbell"
device.api.vapix.ports["1"].name = ""
for event in EVENTS:
device.api.stream.event.manage_event(event)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("switch")) == 2
relay_0 = hass.states.get(f"switch.{NAME}_doorbell")
assert relay_0.state == "off"
assert relay_0.name == f"{NAME} Doorbell"
relay_1 = hass.states.get(f"switch.{NAME}_relay_1")
assert relay_1.state == "on"
assert relay_1.name == f"{NAME} Relay 1"
device.api.vapix.ports["0"].action
|
= Mock()
await hass.services.async_call(
"switch", "turn_on", {"entity_id": f"switch.{NAME}_doorbell"}, blocking=True
)
await hass.services.async_call(
"switch", "turn_off", {"entity_id": f"switch.{NAME}_doorbell"}, blocking=True
)
assert device.a
|
pi.vapix.ports["0"].action.call_args_list == [
mock_call("/"),
mock_call("\\"),
]
|
ecometrica/gdal2mbtiles
|
tests/test_spatial_reference.py
|
Python
|
apache-2.0
| 1,178 | 0 |
# -*- coding: utf-8 -*-
import pytest
from numpy import array
from numpy.testing import assert_array_almost_equal
from gdal2mbtiles.constants import (EPSG_WEB_MERCATOR,
EPSG3857_EXTENTS)
from gdal2mbtiles.gdal import SpatialReference
@pytest.fixture
def epsg_3857_from_proj4():
"""
Return a gdal spatial reference object with
3857 crs using the ImportFromProj4 method.
"""
spatial_ref = SpatialReference()
spatial_ref.ImportFromProj4('+init=epsg:3857')
return spatial_ref
@pytest.fixture
def epsg_3857_from_epsg():
"""
Return a gdal spatial reference object with
3857 crs using the FromEPSG method.
"""
spatial_ref = SpatialReference.FromEPSG(EPSG_WEB_MERCATOR)
return spati
|
al_ref
def test_epsg_3857_proj4(epsg_3857_from_proj4):
extents = epsg_3857_from_proj4.GetWorldExtents()
extents = array(extents)
assert_array_almost_equal(extents, EPSG3857_EXTENTS, decimal=3)
def test_epsg_3857_from_epsg(epsg_3857_from_epsg):
extents = epsg_3857_from_epsg.GetWorldExtents()
extents = array(extents)
assert_array_almost_equ
|
al(extents, EPSG3857_EXTENTS, decimal=3)
|
F5Networks/f5-openstack-agent
|
f5_openstack_agent/lbaasv2/drivers/bigip/icontrol_driver.py
|
Python
|
apache-2.0
| 112,266 | 0 |
# coding=utf-8#
# Copyright (c) 2014-2018, F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import hashlib
import json
import logging as std_logging
import os
import urllib
from eventlet import greenthread
from time import strftime
from time import time
from requests import HTTPError
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_utils import importutils
from f5.bigip import ManagementRoot
from f5_openstack_agent.lbaasv2.dri
|
vers.bigip.cluster_manager import \
ClusterManager
from f5_openstack_agent.lbaasv2.drivers.bigip import constants_v2 as
|
f5const
from f5_openstack_agent.lbaasv2.drivers.bigip.esd_filehandler import \
EsdTagProcessor
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_builder import \
LBaaSBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip.lbaas_driver import \
LBaaSBaseDriver
from f5_openstack_agent.lbaasv2.drivers.bigip import network_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.network_service import \
NetworkServiceBuilder
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.service_adapter import \
ServiceModelAdapter
from f5_openstack_agent.lbaasv2.drivers.bigip import ssl_profile
from f5_openstack_agent.lbaasv2.drivers.bigip import stat_helper
from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import \
SystemHelper
from f5_openstack_agent.lbaasv2.drivers.bigip.tenants import \
BigipTenantManager
from f5_openstack_agent.lbaasv2.drivers.bigip.utils import serialized
from f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address import \
VirtualAddress
LOG = logging.getLogger(__name__)
NS_PREFIX = 'qlbaas-'
__VERSION__ = '0.1.1'
# configuration objects specific to iControl driver
# XXX see /etc/neutron/services/f5/f5-openstack-agent.ini
OPTS = [ # XXX maybe we should make this a dictionary
cfg.StrOpt(
'bigiq_hostname',
help='The hostname (name or IP address) to use for the BIG-IQ host'
),
cfg.StrOpt(
'bigiq_admin_username',
default='admin',
help='The admin username to use for BIG-IQ authentication',
),
cfg.StrOpt(
'bigiq_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_keystone_uri',
default='http://192.0.2.248:5000/',
help='The admin password to use for BIG-IQ authentication'
),
cfg.StrOpt(
'openstack_admin_username',
default='admin',
help='The admin username to use for authentication '
'with the Keystone service'
),
cfg.StrOpt(
'openstack_admin_password',
default='[Provide password in config file]',
secret=True,
help='The admin password to use for authentication'
' with the Keystone service'
),
cfg.StrOpt(
'bigip_management_username',
default='admin',
help='The admin username that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'bigip_management_password',
default='[Provide password in config file]',
secret=True,
help='The admin password that the BIG-IQ will use to manage '
'discovered BIG-IPs'
),
cfg.StrOpt(
'f5_device_type', default='external',
help='What type of device onboarding'
),
cfg.StrOpt(
'f5_ha_type', default='pair',
help='Are we standalone, pair(active/standby), or scalen'
),
cfg.ListOpt(
'f5_external_physical_mappings', default=['default:1.1:True'],
help='Mapping between Neutron physical_network to interfaces'
),
cfg.StrOpt(
'f5_vtep_folder', default='Common',
help='Folder for the VTEP SelfIP'
),
cfg.StrOpt(
'f5_vtep_selfip_name', default=None,
help='Name of the VTEP SelfIP'
),
cfg.ListOpt(
'advertised_tunnel_types', default=['vxlan'],
help='tunnel types which are advertised to other VTEPs'
),
cfg.BoolOpt(
'f5_populate_static_arp', default=False,
help='create static arp entries based on service entries'
),
cfg.StrOpt(
'vlan_binding_driver',
default=None,
help='driver class for binding vlans to device ports'
),
cfg.StrOpt(
'interface_port_static_mappings',
default=None,
help='JSON encoded static mapping of'
'devices to list of '
'interface and port_id'
),
cfg.StrOpt(
'l3_binding_driver',
default=None,
help='driver class for binding l3 address to l2 ports'
),
cfg.StrOpt(
'l3_binding_static_mappings', default=None,
help='JSON encoded static mapping of'
'subnet_id to list of '
'port_id, device_id list.'
),
cfg.BoolOpt(
'f5_route_domain_strictness', default=False,
help='Strict route domain isolation'
),
cfg.BoolOpt(
'f5_common_networks', default=False,
help='All networks defined under Common partition'
),
cfg.BoolOpt(
'f5_common_external_networks', default=True,
help='Treat external networks as common'
),
cfg.BoolOpt(
'external_gateway_mode', default=False,
help='All subnets have an external l3 route on gateway'
),
cfg.StrOpt(
'icontrol_vcmp_hostname',
help='The hostname (name or IP address) to use for vCMP Host '
'iControl access'
),
cfg.StrOpt(
'icontrol_hostname',
default="10.190.5.7",
help='The hostname (name or IP address) to use for iControl access'
),
cfg.StrOpt(
'icontrol_username', default='admin',
help='The username to use for iControl access'
),
cfg.StrOpt(
'icontrol_password', default='admin', secret=True,
help='The password to use for iControl access'
),
cfg.IntOpt(
'icontrol_connection_timeout', default=30,
help='How many seconds to timeout a connection to BIG-IP'
),
cfg.IntOpt(
'icontrol_connection_retry_interval', default=10,
help='How many seconds to wait between retry connection attempts'
),
cfg.DictOpt(
'common_network_ids', default={},
help='network uuid to existing Common networks mapping'
),
cfg.StrOpt(
'icontrol_config_mode', default='objects',
help='Whether to use iapp or objects for bigip configuration'
),
cfg.IntOpt(
'max_namespaces_per_tenant', default=1,
help='How many routing tables the BIG-IP will allocate per tenant'
' in order to accommodate overlapping IP subnets'
),
cfg.StrOpt(
'cert_manager',
default=None,
help='Class name of the certificate mangager used for retrieving '
'certificates and keys.'
),
cfg.StrOpt(
'auth_version',
default=None,
help='Keystone authentication version (v2 or v3) for Barbican client.'
),
cfg.StrOpt(
'os_project_id',
default='service',
help='OpenStack project ID.'
),
cfg.StrOpt(
'os_auth_url',
default=None,
help='OpenStack authentication URL.'
),
cfg.StrOpt(
'os_username',
default=None,
help='OpenStack user name for Keystone authentication.'
),
cfg.StrOpt(
|
HomeRad/TorCleaner
|
test.py
|
Python
|
gpl-2.0
| 32,033 | 0.00103 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# SchoolTool - common information systems platform for school administration
# Copyright (c) 2003 Shuttleworth Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
SchoolTool test runner.
Syntax: test.py [options] [pathname-regexp [test-regexp]]
Test cases are located in the directory tree starting at the location of this
script, in subdirectories named 'tests' and in Python modules named
'test*.py'. They are then filtered according to pathname and test regexes.
Alternatively, packages may just have 'tests.py' instead of a subpackage
'tests'.
A leading "!" in a regexp is stripped and negates the regexp. Pathname
regexp is applied to the whole path (package/package/module.py). Test regexp
is applied to a full test id (package.package.module.class.test_method).
Options:
-h, --help print this help message
-v verbose (print dots for each test run)
-vv very verbose (print test names)
-q quiet (do not print anything on success)
-c colorize output
-d invoke pdb when an exception occurs
-1 report only the first failure in doctests
-p show progress bar (can be combined with -v or -vv)
--level n select only tests at level n or lower
--all-levels select all tests
--list-files list all selected test files
--list-tests list all selected test cases
--coverage create code coverage reports
--profile profile the unit tests
--search-in dir limit directory tree walk to dir (optimisation)
--immediate-errors show errors as soon as they happen (default)
--delayed-errors show errors after all tests were run
--resource name enable given resource
"""
#
# This script borrows ideas from Zope 3's test runner heavily. It is smaller
# and cleaner though, at the expense of more limited functionality.
#
import re
import os
import sys
import time
import types
import getopt
import unittest
import traceback
import linecache
import pdb
__metaclass__ = type
RCS_IGNORE = [
"SCCS",
"BitKeeper",
"CVS",
".pc",
".hg",
".svn",
".git",
]
class Options:
"""Configurable properties of the test runner."""
# test location
basedir = '.' # base directory for tests (defaults to
# basedir of argv[0]), must be absolute
search_in = () # list of subdirs to traverse (defaults to
# basedir)
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# available resources
resources = []
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
run_tests = True # run tests (disabled by --list-foo)
postmortem = False # invoke pdb when an exception occurs
profile = False
|
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
first_doctest_failure = False # report first doctest failure (-1)
print_import_time =
|
True # print time taken to import test modules
# (currently hardcoded)
progress = False # show running progress (-p)
colorize = False # colorize output (-c)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = True # show tracebacks twice (--immediate-errors,
# --delayed-errors)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Return a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def has_path_component(path, name):
_drive, path = os.path.splitdrive(path)
head, tail = os.path.split(path)
while head and tail:
if tail == name:
return True
head, tail = os.path.split(head)
return False
def get_test_files(cfg):
"""Return a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
allresults = []
test_names = ['tests']
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
# Ignore files starting with a dot.
# Do not not descend into subdirs containing a dot.
# Ignore versioning system files
remove = []
for idx, file in enumerate(files):
if file.startswith('.'):
remove.append(idx)
elif '.' in file and os.path.isdir(os.path.join(dir, file)):
remove.append(idx)
elif file in RCS_IGNORE:
remove.append(idx)
remove.reverse()
for idx in remove:
del files[idx]
# Skip non-test directories, but look for tests.py
if not has_path_component(dir, test_name):
if test_name + '.py' in files:
path = os.path.join(dir, test_name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
test_files = [f for f in files if \
f.startswith('test') and f.endswith(".py")]
if '__init__.py' not in files:
if test_files:
# Python test files found, but no __init__.py
print >> sys.stderr, "%s is not a package" % dir
return
for file in test_files:
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
for test_name in test_names:
results = []
for dir in cfg.search_in:
walker(dir, visit, None)
results.sort()
allresults += results
return allresults
def import_module(filename, cfg, tracer=None):
"""Import and return a module."""
filename = os.path.spli
|
greggy/python-ipgeobase
|
cidr_create.py
|
Python
|
bsd-3-clause
| 1,230 | 0.010906 |
# -*- coding: utf-8 -*-
import cPickle as pickle
GEO_FILES = './geo_files'
def gen_db():
u'''Функция для генерации pickle базы ipgeobase.ru
'''
res = []
tmp_list = []
cities_dict = {}
# cidr_optim.txt
for line in open('%s/cidr_optim.txt' % GEO_FILES, 'r'):
a = line.split('\t')
a[4] = a[4].strip()
if a[4] == '-':
a[4] = None
else:
a[4] = int(a[4])
tmp_list.append(a[0])
res.append((int(a[0]), int(a[1]), a[3], a[4]))
res = sorted(res, key=lambda i: i[0])
# проверка на дубли
c = 0
for item in res:
if c > 0:
i
|
f item[0] == res[c-1][0]:
res.remove(item)
c += 1
# cities.txt
cities_file = open('%s/cities.txt' % GEO_FILES, 'r').read()
lines = cities_file.decode('CP1251').split('\n')
for line in lines:
|
a = line.split('\t')
if len(a) > 3:
cities_dict.update({int(a[0]): (a[1], a[2])})
f = open('%s/cidr_pickle.db' % GEO_FILES, 'w')
pickle.dump((res, cities_dict), f)
f.close()
if __name__ == '__main__':
gen_db()
|
OpenConextApps/OpenConextApps-MediaWiki
|
includes/zhtable/Makefile.py
|
Python
|
gpl-2.0
| 13,107 | 0.036927 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# @author Philip
import tarfile as tf
import zipfile as zf
import os, re, shutil, sys, platform
pyversion = platform.python_version()
islinux = platform.system().lower() == 'linux'
if pyversion[:3] in ['2.6', '2.7']:
import urllib as urllib_request
import codecs
open = codecs.open
_unichr = unichr
if sys.maxunicode < 0x10000:
def unichr(i):
if i < 0x10000:
return _unichr(i)
else:
return _unichr( 0xD7C0 + ( i>>10 ) ) + _unichr( 0xDC00 + ( i & 0x3FF ) )
elif pyversion[:2] == '3.':
import urllib.request as urllib_request
unichr = chr
def unichr2( *args ):
return [unichr( int( i.split('<')[0][2:], 16 ) ) for i in args]
def unichr3( *args ):
return [unichr( int( i[2:7], 16 ) ) for i in args if i[2:7]]
# DEFINE
SF_MIRROR = 'easynews'
SCIM_TABLES_VER = '0.5.9'
SCIM_PINYIN_VER = '0.5.91'
LIBTABE_VER = '0.2.3'
# END OF DEFINE
def download( url, dest ):
if os.path.isfile( dest ):
print( 'File %s up to date.' % dest )
return
global islinux
if islinux:
# we use wget instead urlretrieve under Linux,
# because wget could display details like download progress
os.system('wget %s' % url)
else:
print( 'Downloading from [%s] ...' % url )
urllib_request.urlretrieve( url, dest )
print( 'Download complete.\n' )
return
def uncompress( fp, member, encoding = 'U8' ):
name = member.rsplit( '/', 1 )[-1]
print( 'Extracting %s ...' % name )
fp.extract( member )
shutil.move( member, name )
if '/' in member:
shutil.rmtree( member.split( '/', 1 )[0] )
return open( name, 'rb', encoding, 'ignore' )
unzip = lambda path, member, encoding = 'U8': \
uncompress( zf.ZipFile( path ), member, encoding )
untargz = lambda path, member, encoding = 'U8': \
uncompress( tf.open( path, 'r:gz' ), member, encoding )
def parserCore( fp, pos, beginmark = None, endmark = None ):
if beginmark and endmark:
start = False
else: start = True
mlist = set()
for line in fp:
if beginmark and line.startswith( beginmark ):
start = True
continue
elif endmark and line.startswith( endmark ):
break
if start and not line.startswith( '#' ):
elems = line.split()
if len( elems ) < 2:
continue
elif len( elems[0] ) > 1:
mlist.add( elems[pos] )
return mlist
def tablesParser( path, name ):
""" Read file from scim-tables and parse it. """
global SCIM_TABLES_VER
src = 'scim-tables-%s/tables/zh/%s' % ( SCIM_TABLES_VER, name )
fp = untargz( path, src, 'U8' )
return parserCore( fp, 1, 'BEGIN_TABLE', 'END_TABLE' )
ezbigParser = lambda path: tablesParser( path, 'EZ-Big.txt.in' )
wubiParser = lambda path: tablesParser( path, 'Wubi.txt.in' )
zrmParser = lambda path: tablesParser( path, 'Ziranma.txt.in' )
def phraseParser( path ):
""" Read phrase_lib.txt and parse it. """
global SCIM_PINYIN_VER
src = 'scim-pinyin-%s/data/phrase_lib.txt' % SCIM_PINYIN_VER
dst = 'phrase_lib.txt'
fp = untargz( path, src, 'U8' )
return parserCore( fp, 0 )
def tsiParser( path ):
""" Read tsi.src and parse it. """
src = 'libtabe/tsi-src/tsi.src'
dst = 'tsi.src'
fp = untargz( path, src, 'big5hkscs' )
return parserCore( fp, 0 )
def unihanParser( path ):
""" Read Unihan_Variants.txt and parse it. """
fp = unzip( path, 'Unihan_Variants.txt', 'U8' )
t2s = dict()
s2t = dict()
for line in fp:
if line.startswith( '#' ):
continue
else:
elems = line.split()
if len( elems ) < 3:
continue
type = elems.pop( 1 )
elems = unichr2( *elems )
if type == 'kTraditionalVariant':
s2t[elems[0]] = elems[1:]
elif type == 'kSimplifiedVariant':
t2s[elems[0]] = elems[1:]
fp.close()
return ( t2s, s2t )
def applyExcludes( mlist, path ):
""" Apply exclude rules from path to mlist. """
excludes = open( path, 'rb', 'U8' ).read().split()
excludes = [word.split( '#' )[0].strip() for word in excludes]
excludes = '|'.join( excludes )
excptn = re.compile( '.*(?:%s).*' % excludes )
diff = [mword for mword in mlist if excptn.search( mword )]
mlist.difference_update( diff )
return mlist
def charManualTable( path ):
fp = open( path, 'rb', 'U8' )
ret = {}
for line in fp:
elems = line.split( '#' )[0].split( '|' )
elems = unichr3( *elems )
if len( elems ) > 1:
ret[elems[0]] = elems[1:]
return ret
def toManyRules( src_table ):
tomany = set()
for ( f, t ) in src_table.iteritems():
for i in range( 1, len( t ) ):
tomany.add( t[i] )
return tomany
def removeRules( path, table ):
fp = open( path, 'rb', 'U8' )
texc = list()
for line in fp:
elems = line.split( '=>' )
f = t = elems[0].strip()
if len( elems ) == 2:
t = elems[1].strip()
f = f.strip('"').strip("'")
t = t.strip('"').strip("'")
if f:
try:
table.pop( f )
except:
pass
if t:
texc.append( t )
texcptn = re.compile( '^(?:%s)$' % '|'.join( texc ) )
for (tmp_f, tmp_t) in table.copy().iteritems():
if texcptn.match( tmp_t ):
table.pop( tmp_f )
|
return table
def customRules( path ):
fp = open( path, 'rb', 'U8' )
ret = dict()
for line in fp:
elems = line.split( '#' )[0].split()
if len( elems ) > 1:
ret[elems[0]] = elems[1]
return ret
def dictToSortedList( src_table, pos ):
return sorted( src_table.items(), key = lambda m: m[pos] )
def translate( text, conv_table ):
i = 0
while i < len( text ):
for
|
j in range( len( text ) - i, 0, -1 ):
f = text[i:][:j]
t = conv_table.get( f )
if t:
text = text[:i] + t + text[i:][j:]
i += len(t) - 1
break
i += 1
return text
def manualWordsTable( path, conv_table, reconv_table ):
fp = open( path, 'rb', 'U8' )
reconv_table = {}
wordlist = [line.split( '#' )[0].strip() for line in fp]
wordlist = list( set( wordlist ) )
wordlist.sort( key = len, reverse = True )
while wordlist:
word = wordlist.pop()
new_word = translate( word, conv_table )
rcv_word = translate( word, reconv_table )
if word != rcv_word:
reconv_table[word] = word
reconv_table[new_word] = word
return reconv_table
def defaultWordsTable( src_wordlist, src_tomany, char_conv_table, char_reconv_table ):
wordlist = list( src_wordlist )
wordlist.sort( key = len, reverse = True )
word_conv_table = {}
word_reconv_table = {}
conv_table = char_conv_table.copy()
reconv_table = char_reconv_table.copy()
tomanyptn = re.compile( '(?:%s)' % '|'.join( src_tomany ) )
while wordlist:
conv_table.update( word_conv_table )
reconv_table.update( word_reconv_table )
word = wordlist.pop()
new_word_len = word_len = len( word )
while new_word_len == word_len:
add = False
test_word = translate( word, reconv_table )
new_word = translate( word, conv_table )
if not reconv_table.get( new_word ) \
and ( test_word != word \
or ( tomanyptn.search( word ) \
and word != translate( new_word, reconv_table ) ) ):
word_conv_table[word] = new_word
word_reconv_table[new_word] = word
try:
word = wordlist.pop()
except IndexError:
break
new_word_len = len(word)
return word_reconv_table
def PHPArray( table ):
lines = ['\'%s\' => \'%s\',' % (f, t) for (f, t) in table if f and t]
return '\n'.join(lines)
def
|
stuarteberg/lazyflow
|
tests/testRESTfulBlockwiseFileset.py
|
Python
|
lgpl-3.0
| 7,468 | 0.014596 |
###############################################################################
# lazyflow: data flow based lazy parallel computation framework
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import os
import sys
import shutil
import tempfile
import numpy
import h5py
import nose
import platform
from lazyflow.roi import sliceToRoi
import logging
logger = logging.getLogger(__name__)
logger.addHandler( logging.StreamHandler( sys.stdout ) )
logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
from lazyflow.utility.io_util.blockwiseFileset import BlockwiseFileset
from lazyflow.utility.io_util.RESTfulBlockwiseFileset import RESTfulBlockwiseFileset
class TestRESTFullBlockwiseFilset(object):
@classmethod
def setupClass(cls):
# The openconnectome site appears to be down at the moment.
# This test fails when that happens...
raise nose.SkipTest
if platform.system() == 'Windows':
# On windows, there are errors, and we make no attempt to solve them (at the moment).
raise nose.SkipTest
try:
BlockwiseFileset._prepare_system()
except ValueError:
# If the system isn't configured to allow lots of open files, we can't run this test.
raise nose.SkipTest
cls.tempDir = tempfile.mkdtemp()
logger.debug("Working in {}".format( cls.tempDir ))
# Create the two sub-descriptions
Bock11VolumeDescription = """
{
"_schema_name" : "RESTful-volume-description",
"_schema_version" : 1.0,
"name" : "Bock11-level0",
"format" : "hdf5",
"axes" : "zyx",
"##NOTE":"The first z-slice of the bock dataset is 2917, so the origin_offset must be at least 2917",
"origin_offset" : [2917, 50000, 50000],
"bounds" : [4156, 135424, 119808],
"dtype" : "numpy.uint8",
"url_format" : "http://openconnecto.me/ocp/ca/bock11/hdf5/0/{x_start},{x_stop}/{y_start},{y_stop}/{z_start},{z_stop}/",
"hdf5_dataset" : "CUTOUT"
}
"""
blockwiseFilesetDescription = \
"""
{
"_schema_name" : "blockwise-fileset-description",
"_schema_version" : 1.0,
"name" : "bock11-blocks",
"format" : "hdf5",
"axes" : "zyx",
"shape" : [40,40,40],
"dtype" : "numpy.uint8",
"block_shape" : [20, 20, 20],
"block_file_name_format" : "block-{roiString}.h5/CUTOUT",
"dataset_root_dir" : "blocks"
}
"""
# Combine them into the composite description (see RESTfulBlockwiseFileset.DescriptionFields)
compositeDescription = \
"""
{{
"_schema_name" : "RESTful-blockwise-fileset-description",
"_schema_version" : 1.0,
"remote_description" : {remote_description},
"local_description" : {local_description}
}}
""".format( remote_description=Bock11VolumeDescription, local_description=blockwiseFilesetDescription )
# Create the description file
cls.descriptionFilePath = os.path.join(cls.tempDir, "description.json")
with open(cls.descriptionFilePath, 'w') as f:
f.write(compositeDescription)
# Create a new fileset that views the same data and stores it the
# same way locally, but this time we'll use an offset 'view'
# Start with a copy of the non-offset description
offsetDescription = RESTfulBlockwiseFileset.readDescription(cls.descriptionFilePath)
offsetDescription.local_description.view_origin = numpy.array([0,20,0])
offsetDescription.local_description.dataset_root_dir = "offset_blocks"
cls.descriptionFilePath_offset = os.path.join(cls.tempDir, "description_offset.json")
RESTfulBlockwiseFileset.writeDescription(cls.descriptionFilePath_offset, offsetDescription)
@classmethod
def teardownClass(cls):
# If the user is debugging, don't clear the files we're testing with.
if logger.level > logging.DEBUG:
shutil.rmtree(cls.tempDir)
def test_1_SingleDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[0:20, 0:20, 0:20]
roi = sliceToRoi(slicing, volume.description.shape)
data = volume.readData( roi )
assert data.shape == (20,20,20)
assert volume.getBlockStatus( ([0,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
def test_2_MultiDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[0:25, 10:30, 0:20]
roi = sliceToRoi(slicing, volume
|
.description.shape)
data = volume.readData( roi )
assert data.shape == (25,20,20)
assert volume.getBlockStatus( ([0,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert vo
|
lume.getBlockStatus( ([20,0,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert volume.getBlockStatus( ([20,20,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
assert volume.getBlockStatus( ([0,20,0]) ) == BlockwiseFileset.BLOCK_AVAILABLE
def test_4_OffsetDownload(self):
volume = RESTfulBlockwiseFileset( self.descriptionFilePath )
slicing = numpy.s_[20:40, 20:40, 20:40]
roi = sliceToRoi(slicing, volume.description.shape)
data = volume.readData( roi )
assert data.shape == (20,20,20)
assert volume.getBlockStatus( ([20,20,20]) ) == BlockwiseFileset.BLOCK_AVAILABLE
offsetVolume = RESTfulBlockwiseFileset( self.descriptionFilePath_offset )
offsetSlicing = numpy.s_[20:40, 0:20, 20:40] # Note middle slice is offset (see view_origin in setupClass)
offsetRoi = sliceToRoi(offsetSlicing, offsetVolume.description.shape)
offsetData = offsetVolume.readData( offsetRoi )
assert offsetData.shape == (20,20,20)
assert offsetVolume.getBlockStatus( ([20,0,20]) ) == BlockwiseFileset.BLOCK_AVAILABLE
# Data should be the same
assert (offsetData == data).all()
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
ret = nose.run(defaultTest=__file__)
if not ret: sys.exit(1)
|
jjgomera/pychemqt
|
lib/elemental.py
|
Python
|
gpl-3.0
| 7,567 | 0.000264 |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''Pychemqt, Chemical Engineering Process simulator
Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
This library implement a chemical element with several properties
* id: atomic number
* name
* altname
* symbol
* serie
* group
* period
* block
* density_Solid
* density_Liq
* density_Gas
* appearance
* date
* country
* discover
* etymology
* atomic_mass
* atomic_volume
* atomic_radius
* covalent_radius
* vanderWaals_radius
* ionic_radii
* lattice_type
* space_group
* lattice_edges
* lattice_angles
* electron_configuration
* oxidation
* electronegativity
* electron_affinity
* first_ionization
* Tf
* Tb
* Heat_f
* Heat_b
* Cp
* k
* T_debye
* color
* notes
'''
import os
import sqlite3
from numpy import linspace, logspace, log
from PyQt5.QtCore import QLocale
from lib.utilities import colors
# Connection to database with element data
connection = sqlite3.connect(os.path.join(
os.environ["pychemqt"], "dat", "elemental.db"))
databank = connection.cursor()
# Load system locale to implement a custon translation system (non qt)
locale = QLocale.system().name().upper()
if "_" in locale:
locale = locale.split("_")[0]
databank.execute("PRAGMA table_info(TRANSLATION)")
translation = []
for i, name, type_, other, other2, primary_key in databank:
if "name_" in name:
translation.append(name.split("_")[-1])
if locale in translation:
tr_available = True
else:
tr_available = False
def cleanFloat(flo):
if flo:
try:
value = float(flo)
except ValueError:
value = float(flo.split("(")[1].split(",")[0])
else:
value = 0
return value
color_serie = ["#DDDDDD", "#79
|
5681", "#B92D2D", "#B8873A", "#D7C848",
"#94738F", "#6186AC", "#88AE62", "#94
|
9692", "#BF924E",
"#C44343"]
color_phase = ["#DDDDDD", "#BB8F4A", "#7BB245", "#5D82A8"]
NUMERIC_VALUES = ["density_Solid", "density_Liq", "density_Gas", "date",
"atomic_mass", "atomic_volume", "atomic_radius",
"covalent_radius", "vanderWaals_radius", "electronegativity",
"electron_affinity", "first_ionization", "Tf", "Tb",
"Heat_f", "Heat_b", "Cp", "k", "T_debye"]
def _configValues(Preferences):
PROP = Preferences.get("Applications", "elementalColorby")
NUM = Preferences.getint("Applications", "elementalDefinition")
LOG = Preferences.getboolean("Applications", "elementalLog")
PMIN = None
PMAX = None
if PROP == "phase":
CATEGORIES = ["", "Solid", "Liquid", "Gas"]
COLORS = color_phase
elif PROP in NUMERIC_VALUES:
databank.execute("SELECT %s FROM ELEMENTS" % PROP)
PMAX = 0
for st, in databank:
value = cleanFloat(st)
if value > PMAX:
PMAX = value
if LOG:
PMIN = 1
CATEGORIES = logspace(log(PMIN), log(PMAX), NUM)
else:
PMIN = 0
CATEGORIES = linspace(PMIN, PMAX, NUM)
COLORS = colors(NUM, scale=True)
elif PROP == "Element":
CATEGORIES = []
COLORS = []
else:
q = "SELECT %s, COUNT(*) c FROM ELEMENTS GROUP BY %s HAVING c > 0" % (
PROP, PROP)
databank.execute(q)
CATEGORIES = []
for category, count in databank:
CATEGORIES.append(category)
if PROP == "serie":
COLORS = color_serie
else:
COLORS = colors(len(CATEGORIES))
return CATEGORIES, PROP, COLORS, PMAX
class Elemental(object):
"""Chemical element class"""
def __init__(self, id):
"""
Parameters
------------
id : int
atomic number of element, [-]
"""
if id > 118:
id = 118
databank.execute("SELECT * FROM ELEMENTS WHERE id=='%i'" % id)
data = databank.fetchone()
self.id = int(data[0])
self.altname = data[2]
self.symbol = data[3]
self.serie = data[4]
self.group = int(data[5])
self.period = int(data[6])
self.block = data[7]
self.density_Solid = self._unit(data[8])
self.density_Liq = self._unit(data[9])
self.density_Gas = self._unit(data[10])
self.appearance = data[11]
self.date = data[12]
self.country = data[13]
self.discover = data[14]
self.etymology = data[15]
self.atomic_mass = self._unit(data[16])
self.atomic_volume = self._unit(data[17])
self.atomic_radius = self._unit(data[18])
self.covalent_radius = self._unit(data[19])
self.vanderWaals_radius = self._unit(data[20])
self.ionic_radii = data[21]
self.lattice_type = data[22]
self.space_group = data[23]
self.lattice_edges = eval(data[24])
self.lattice_volume = self.lattice_edges[0]*self.lattice_edges[1] * \
self.lattice_edges[2] / 1e9
self.lattice_angles = eval(data[25])
self.electron_configuration = data[26]
self.oxidation = data[27]
self.electronegativity = self._unit(data[28])
self.electron_affinity = self._unit(data[29])
self.first_ionization = self._unit(data[30])
self.Tf = self._unit(data[31])
self.Tb = self._unit(data[32])
if not self.Tf or not self.Tb:
self.phase = ""
elif self.Tf > 273.15:
self.phase = "Solid"
elif self.Tb < 273.15:
self.phase = "Gas"
else:
self.phase = "Liquid"
self.Heat_f = self._unit(data[33])
self.Heat_b = self._unit(data[34])
self.Cp = self._unit(data[35])
self.k = self._unit(data[36])
self.T_debye = self._unit(data[37])
self.color = data[38]
self.notes = data[39]
# Translation
self.name = data[1]
if tr_available:
qu = "SELECT name_%s FROM TRANSLATION WHERE id==%i" % (locale, id)
databank.execute(qu)
tr_name = databank.fetchone()[0]
if tr_name:
self.name = tr_name
# Isotopes
query = "SELECT * FROM ISOTOPES WHERE atomic_number==?" + \
"ORDER BY mass_number"
databank.execute(query, (self.id, ))
self.isotopes = []
for data in databank:
self.isotopes.append((int(data[4]), data[2], data[3]))
def _unit(self, str):
aproximate = False
try:
value = float(str)
except:
if not str:
value = None
elif str[-1] == ")":
value = float(str.split("(")[1].split(",")[0])
aproximate = True
if aproximate:
value.code = "stimated"
return value
|
iPlantCollaborativeOpenSource/cyverse-sdk
|
src/scripts/template2json.py
|
Python
|
bsd-3-clause
| 1,718 | 0.001164 |
#!/usr/bin/env python
import os
import sys
import argparse
import json
# tempate2json.py -k SYSTEM=lonestar.tacc.utexas.edu
# PATH=/home/vaughn/apps -i template.jsonx -o file.json
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-k", dest='keys', help='Space-delimited VAR=Value sets', nargs='
|
*')
parser.add_argument("-i", dest='input', help='Input (template.jsonx)')
parser.add_argument("-o", dest="output", help="Output (output.json)")
args = parser.parse_args()
try:
with open(args.input
|
) as f:
s = f.read()
except TypeError, e:
print >> sys.stderr, "[FATAL]", "No filename was provided for -i"
sys.exit(1)
except IOError, e:
print >> sys.stderr, "[FATAL]", args.input, "was not available for reading"
print >> sys.stderr, "Exception: %s" % str(e)
sys.exit(1)
# Iterate through document, replacing variables with values
for kvp in args.keys:
try:
(key, val) = kvp.split('=')
except ValueError:
print '[WARN]', kvp, 'not a valid VAR=Value pair'
keyname = '${' + key + '}'
s = s.replace(keyname, val)
# Print out to JSON
jsonDoc = json.loads(s)
outpath = os.path.dirname(args.output)
if outpath:
if not os.path.exists(os.path.dirname(args.output)):
try:
os.makedirs(os.path.dirname(args.output))
except OSError as exc: # Guard against race condition
print >> sys.stderr, "Exception: %s" % str(exc)
sys.exit(1)
with open(args.output, 'w') as outfile:
json.dump(jsonDoc, outfile, indent=4)
|
Luminarys/Bioinformatics
|
Scripts/GetRPKM.py
|
Python
|
gpl-2.0
| 1,450 | 0.006207 |
#!/usr/bin/env python
#Takes a table and index of Exon Lens, calculates RPKMs
import sys
import os
import re
import fileinput
from decimal import Decimal
from decimal import getcontext
from fractions import Fraction
def main(table,index):
indf = open(index)
out_file = "%s_rpkm.table" % os.path.splitext(table)[0]
dic = {}
for line in fi
|
leinput.input(index):
(key, val) = line.split('\t')
if val == 0:
print "We Ffd up at " + str(key)
dic[str(key.rstrip())] = Decimal(val)
print dic["Vocar20014554m.g.2.0"]
with open(out_file,'w') as fout:
start = True
tb = "\t"
for line in fileinput.input(table):
if not start:
listl = line.split(
|
'\t')
head = listl[0]
vals = listl[1:]
for i in xrange(len(vals)):
comp = Decimal(vals[i])
div = Decimal(dic[head.rstrip()])
print head.rstrip()
ot = "%.2f" % float(Decimal(comp)/div)
vals[i] = ot
fout.write(head+"\t")
fout.write(tb.join(vals)+"\n")
else:
start = False
fout.write(line+"\n")
if __name__ == '__main__':
print 'input table is: ' + sys.argv[1]
print 'input file is ' + sys.argv[2]
main(sys.argv[1],sys.argv[2])
print "completed"
|
khosrow/metpx
|
pxStats/lib/LogFileAccessManager.py
|
Python
|
gpl-2.0
| 12,296 | 0.024723 |
#! /usr/bin/env python
"""
###########################################################################################
|
##
#
#
# Name: LogFileAccessManager.py
#
# @author: Nicholas Lemay
#
# @license: MetPX Copyright (C) 2004-2006 Environment Canada
# MetPX comes with ABSOLUTELY NO WARRANTY; For details type see the file
# named COPYING in the root of the source directory tree.
#
# Description : Utility class used to manage the access to the log files by the
# the pickle updater.
#
# Note : If this file is to be modified, please run the main() method at the bottom of this
# file t
|
o make sure everything still works properly. Feel free to add tests if needed.
#
# While using this class, you can either use only one file with all your entries
# and give a different identifier to all of you entries, or you can use different
# files.
#
# Using a single file however can be problematic if numerous process try to update
# the file at the same time.
#
#############################################################################################
"""
import os, sys, commands, time
sys.path.insert(1, os.path.dirname( os.path.abspath(__file__) ) + '/../../')
from pxStats.lib.StatsPaths import StatsPaths
from pxStats.lib.CpickleWrapper import CpickleWrapper
class LogFileAccessManager(object):
def __init__( self, accessDictionary = None, accessFile = "" ):
"""
@summary: LogFileAccessManager constructor.
@param accessArrays:
@param accessFile:
"""
paths = StatsPaths()
paths.setPaths()
if accessFile =="":
accessFile = paths.STATSLOGACCESS + "default"
self.accessDictionary = accessDictionary or {} # Empty array to start with.
self.accessFile = accessFile #File that contains the current file acces.
if self.accessDictionary == {} and os.path.isfile( self.accessFile ):
self.loadAccessFile()
def saveAccessDictionary( self ):
"""
@summary: Saves the current accessDictionary into the
accessfile.
"""
if not os.path.isdir( os.path.dirname( self.accessFile ) ):
os.makedirs( os.path.dirname( self.accessFile ) )
CpickleWrapper.save( self.accessDictionary, self.accessFile )
def loadAccessFile(self):
"""
@summary: Loads the accessFile into the accessDictionary.
"""
self.accessDictionary = CpickleWrapper.load( self.accessFile )
def getLineAssociatedWith( self, identifier ):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return: returns the first line of the last file accessed by the identifier.
If identifier has no associated line, the returned line will be "".
"""
line = ""
try:#In case the key does not exist.
line = self.accessDictionary[ identifier ][0]
except:#Pass keyerror
pass
return line
def getLastReadPositionAssociatedWith(self, identifier):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return: returns the last read position of the last file
accessed by the identifier. If no position is
associated with identifier will return 0.
"""
lastReadPositon = 0
try:#In case the key does not exist.
lastReadPositon = self.accessDictionary[ identifier ][1]
except:#Pass keyerror
pass
return lastReadPositon
def getFirstLineFromFile(self, fileName):
"""
@summary: Reads the first line of a file and returns it.
@param fileName: File from wich you want to know
@return: The first line of the specified file.
"""
firstLine = ""
if os.path.isfile( fileName ):
fileHandle = open( fileName, "r")
firstLine = fileHandle.readline()
fileHandle.close()
return firstLine
def getFirstLineAndLastReadPositionAssociatedwith(self, identifier):
"""
@param identifier: Identifier string of the following format:
fileType_client/sourcename_machineName
@return : A tuple containing the first line of the last file
read(in string format) and the last read position
(int format).
"""
line = ""
lastReadPositon = 0
try:#In case the key does not exist.
line ,lastReadPositon = self.accessDictionary[ identifier ]
except:#Pass keyerror
pass
return line, lastReadPositon
def setFirstLineAssociatedwith(self, firstLine, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param firstLine: First line to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
currentLastReadPosition = self.getLastReadPositionAssociatedWith(identifier)
self.accessDictionary[ identifier ] = firstLine, currentLastReadPosition
def setLastReadPositionAssociatedwith(self, lastReadPosition, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param lastReadPosition: Position to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
currentFirstLine = self.getLineAssociatedWith(identifier)
self.accessDictionary[ identifier ] = currentFirstLine, lastReadPosition
def setFirstLineAndLastReadPositionAssociatedwith(self, firstLine, lastReadPosition, identifier ):
"""
@summary: Simple setter that hides data structure implementation
so that methods still work if implementation is ever
to change.
@param firstLine: First line to set.
@param lastReadPosition: Position to set.
@param identifier:Identifier string of the following format:
fileType_client/sourcename_machineName
"""
self.accessDictionary[ identifier ] = (firstLine, lastReadPosition)
def isTheLastFileThatWasReadByThisIdentifier(self, fileName, identifier ):
"""
@summary : Returns whether or not(True or False ) the specified file
was the last one read by the identifier.
@param fileName: Name fo the file to be verified.
@param identifier: Identifier string of the following format:
|
cchurch/ansible
|
test/units/modules/storage/netapp/test_netapp_e_volume.py
|
Python
|
gpl-3.0
| 58,947 | 0.005038 |
# coding=utf-8
# (c) 2018, NetApp Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
from unittest import mock
except ImportError:
import mock
from ansible.module_utils.netapp import NetAppESeriesModule
from ansible.modules.storage.netapp.netapp_e_volume import NetAppESeriesVolume
from units.modules.utils import AnsibleFailJson, ModuleTestCase, set_module_args
class NetAppESeriesVolumeTest(ModuleTestCase):
REQUIRED_PARAMS = {"api_username": "username",
"api_password": "password",
"api_url": "http://localhost/devmgr/v2",
"ssid": "1",
"validate_certs": "no"}
THIN_VOLUME_RESPONSE = [{"capacity": "1288490188800",
"volumeRef": "3A000000600A098000A4B28D000010475C405428",
"status": "optimal",
"protectionType": "type1Protection",
"maxVirtualCapacity": "281474976710656",
"initialProvisionedCapacity": "4294967296",
"currentProvisionedCapacity": "4294967296",
"provisionedCapacityQuota": "1305670057984",
"growthAlertThreshold": 85,
"expansionPolicy": "automatic",
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000001000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "volume"}],
"dataAssurance": True,
"segmentSize": 131072,
"diskPool": True,
"listOfMappings": [],
"mapped": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": True,
"readAheadMultiplier": 0},
"name": "thin_volume",
"id": "3A000000600A098000A4B28D000010475C405428"}]
VOLUME_GET_RESPONSE = [{"offline": False,
"raidLevel": "raid6",
"capacity": "214748364800",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F095C2F7F31",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Clare"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "214748364800",
"name": "Matthew",
"id": "02000000600A098000A4B9D100000F095C2F7F31"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"reconPriority": 1,
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B28D00000FBE5C2F7F26",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
{"key": "volumeTypeId", "value": "Samantha"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000001",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Samantha",
"id": "02000000600A098000A4B28D00000FBE5C2F7F26"},
{"offline": False,
"raidLevel": "raid6",
"capacity": "107374182400",
"segmentSize": 131072,
"volumeRef": "02000000600A098000A4B9D100000F0B5C2F7F40",
"status": "optimal",
"protectionInformationCapable": False,
"protectionType": "type0Protection",
"volumeGroupRef": "04000000600A098000A4B9D100000F085C2F7F26",
|
"diskPool": True,
"flashCached": False,
"metadata": [{"key": "workloadId", "value": "4200000002000000000000000000000000000000"},
|
{"key": "volumeTypeId", "value": "Micah"}],
"dataAssurance": False,
"currentControllerId": "070000000000000000000002",
"cacheSettings": {"readCacheEnable": True, "writeCacheEnable": False,
"readAheadMultiplier": 0},
"thinProvisioned": False,
"totalSizeInBytes": "107374182400",
"name": "Micah",
"id": "02000000600A098000A4B9D100000F0B5C2F7F40"}]
STORAGE_POOL_GET_RESPONSE = [{"offline": False,
"raidLevel": "raidDiskPool",
"volumeGroupRef": "04000000600A",
"securityType": "capable",
"protectionInformationCapable": False,
"protectionInformationCapabilities": {"protectionInformationCapable": True,
"protectionType": "type2Protection"},
"volumeGroupData": {"type": "diskPool",
"diskPoolData": {"reconstructionReservedDriveCount": 1,
"reconstructionReservedAmt": "296889614336",
"reconstructionReservedDriveCountCurrent": 1,
"poolUtilizationWarningThreshold": 0,
"poolUtilizationCriticalThreshold": 85,
"poolUtilizationState": "utilizationOptimal",
"unusableCapacity": "0",
"degradedReconstructPriority": "high",
"criticalReconstructPriority": "highest",
"backg
|
andersx/cclib
|
src/cclib/writer/xyzwriter.py
|
Python
|
lgpl-2.1
| 4,078 | 0.000981 |
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""A writer for XYZ (Cartesian coordinate) files."""
from . import filewriter
class XYZ(filewriter.Writer):
"""A writer for XYZ (Cartesian coordinate) files."""
def __init__(self, ccdata, splitfiles=False,
firstgeom=False, lastgeom=True, allgeom=False,
*args, **kwargs):
"""Initialize the XYZ writer object.
Inputs:
ccdata - An instance of ccData, parse from a logfile.
splitfiles - Boolean to write multiple files if multiple files are requested. [TODO]
firstgeom - Boolean to write the first available geometry from the logfile.
lastgeom - Boolean to write the last available geometry from the logfile.
allgeom - Boolean to write all available geometries from the logfile.
"""
# Call the __init__ method of the superclass
super(XYZ, self).__init__(ccdata, *args, **kwargs)
self.do_firstgeom = firstgeom
self.do_lastgeom = lastgeom
self.do_allgeom = allgeom
self.generate_repr()
def generate_repr(self):
"""Generate the XYZ representation of the logfile data."""
# Options for output (to a single file):
# 1. Write all geometries from an optimization, which programs like VMD
# can read in like a trajectory.
# 2. Write the final con
|
verged geometry, which for any job other than
# a geometry optimization would be the single/only geometry.
# 3. Write the very first geometry, which for any job other than a
# geometry optimization would be the single/only geometry.
# 4. Write the first and last geometries from a geometry optimization.
# Options for ouput (to multiple files):
# 1. Write all geometries from an optimization, to suitably named files. [TODO]
xyzblock = []
|
lencoords = len(self.ccdata.atomcoords)
if lencoords == 1:
xyzblock.append(self._xyz_from_ccdata(-1))
elif self.do_allgeom:
for index in range(lencoords):
xyzblock.append(self._xyz_from_ccdata(index))
elif self.do_firstgeom and self.do_lastgeom:
xyzblock.append(self._xyz_from_ccdata(0))
xyzblock.append(self._xyz_from_ccdata(-1))
elif self.do_firstgeom:
xyzblock.append(self._xyz_from_ccdata(0))
elif self.do_lastgeom:
xyzblock.append(self._xyz_from_ccdata(-1))
# If none of the options are set, return the empty string.
else:
xyzblock.append("")
return '\n'.join(xyzblock)
def _xyz_from_ccdata(self, index):
"""Create an XYZ file of the geometry at the given index."""
natom = str(self.ccdata.natom)
element_list = [self.pt.element[Z] for Z in self.ccdata.atomnos]
atomcoords = self.ccdata.atomcoords[index]
# Create a comment derived from the filename and the index.
if index == -1:
geometry_num = len(self.ccdata.atomcoords)
else:
geometry_num = index + 1
if self.jobfilename is not None:
comment = "{}: Geometry {}".format(self.jobfilename, geometry_num)
else:
comment = "Geometry {}".format(geometry_num)
atom_template = '{:3s} {:15.10f} {:15.10f} {:15.10f}'
block = []
block.append(natom)
block.append(comment)
for element, (x, y, z) in zip(element_list, atomcoords):
block.append(atom_template.format(element, x, y, z))
return '\n'.join(block)
if __name__ == "__main__":
pass
|
centaurialpha/pireal
|
setup.py
|
Python
|
gpl-3.0
| 91 | 0 |
#!/usr/bin/env python
from setuptools i
|
mpo
|
rt setup
if __name__ == "__main__":
setup()
|
LettError/glyphNameFormatter
|
Lib/glyphNameFormatter/rangeProcessors/helper_arabic_ligature_exceptions.py
|
Python
|
bsd-3-clause
| 1,516 | 0.005937 |
from __future__ import print_function
from glyphNameFormatter.tools import camelCase
doNotProcessAsLigatureRanges = [
(0xfc5e, 0xfc63),
(0xfe70, 0xfe74),
#(0xfc5e, 0xfc61),
(0xfcf2, 0xfcf4),
(0xfe76, 0xfe80),
]
def process(self):
# Specifically: do not add suffixes to these ligatures,
# they're really arabic marks
for a, b in doNotProcessAsLigatureRanges:
if a <= self.uniNumber <= b:
self.replace('TAIL FRAGMENT', "kashida Fina")
self.replace('INITIAL FORM', "init")
self.replace('MEDIAL FORM', "medi")
self.replace(
|
'FINAL FORM', "fina")
self.replace('ISOLATED FORM', "isol")
self.replace('WITH SUPERSCRIPT', "")
self.replace('WITH', "")
|
self.replace("LIGATURE", "")
self.replace("ARABIC", "")
self.replace("SYMBOL", "")
self.replace("LETTER", "")
self.lower()
self.camelCase()
return True
return False
if __name__ == "__main__":
from glyphNameFormatter import GlyphName
print("\ndoNotProcessAsLigatureRanges", doNotProcessAsLigatureRanges)
odd = 0xfe76
for a, b in doNotProcessAsLigatureRanges:
for u in range(a,b+1):
try:
g = GlyphName(uniNumber=u)
n = g.getName()
print(hex(u), n, g.uniName)
except:
import traceback
traceback.print_exc()
|
zhaochl/python-utils
|
utils/except_util.py
|
Python
|
apache-2.0
| 134 | 0.014925 |
#!/usr/bin/env python
# coding=utf-8
import traceback
try:
raise SyntaxError
|
, "traceback test"
except:
traceback.pr
|
int_exc()
|
ksterker/wastesedge
|
scripts/schedules/mapcharacters/sarin.py
|
Python
|
gpl-2.0
| 2,833 | 0.012354 |
#
# (C) Copyright 2001/2002 Kai Sterker <kaisterker@linuxgames.com>
# Part of the Adonthell Project http://adonthell.linuxgames.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY.
#
# See the COPYING file for more details
#
# -- Movement schedule for Sarin Trailfollower
#
# He walks from one end of the room to the other. From time to
# to time he'll stop and chose another direction
import adonthell
import schedule
import random
def _(message): return message
class sarin (schedule.speak):
de
|
f __init__ (self, map
|
characterinstance):
self.myself = mapcharacterinstance
# -- Borders of the area he should stay in
self.min_x = 1
self.max_x = 6
self.min_y = 2
self.max_y = 6
self.direction = self.myself.get_val ("direction")
# -- make random remarks
self.speech = [_("Ruffians, the lot of them!"), \
_("How dare they imprison one better than they?"), \
_("This is an insult to all of the High Born."), \
_("I cannot believe such disrespect. Barbarians!")]
self.speech_delay = (20, 40)
schedule.speak.__init__(self)
self.myself.set_callback (self.goal_reached)
def switch_direction (self):
# -- ... and set the new one accordingly
if self.direction == adonthell.WALK_EAST or self.direction == adonthell.WALK_WEST:
self.direction = random.randrange (adonthell.WALK_NORTH, adonthell.WALK_SOUTH + 1)
else:
self.direction = random.randrange (adonthell.WALK_WEST, adonthell.WALK_EAST + 1)
delay = "%it" % random.randrange (30, 60)
self.myself.time_callback (delay, self.switch_direction)
self.walk ()
def walk (self):
# -- switch direction
if self.direction == adonthell.WALK_NORTH:
goal = (self.myself.posx (), self.min_y, adonthell.STAND_SOUTH, 0, 1)
elif self.direction == adonthell.WALK_SOUTH:
goal = (self.myself.posx (), self.max_y, adonthell.STAND_NORTH, 0, -1)
elif self.direction == adonthell.WALK_EAST:
goal = (self.max_x, self.myself.posy (), adonthell.STAND_WEST, -1, 0)
else:
goal = (self.min_x, self.myself.posy (), adonthell.STAND_EAST, 1, 0)
x, y, d = goal[:3]
self.direction = d + 4
while not self.myself.set_goal (x, y, d):
offx, offy = goal [-2:]
x = x + offx
y = y + offy
def goal_reached (self):
delay = "%it" % random.randrange (3, 6)
self.myself.time_callback (delay, self.walk)
|
gnocchixyz/python-gnocchiclient
|
gnocchiclient/v1/resource_cli.py
|
Python
|
apache-2.0
| 10,591 | 0 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import distutils.util
from cliff import command
from cliff import lister
from cliff import show
from gnocchiclient import exceptions
from gnocchiclient import utils
class CliResourceList(lister.Lister):
"""List resources."""
COLS = ('id', 'type',
'project_id', 'user_id',
'original_resource_id',
'started_at', 'ended_at',
'revision_start', 'revision_end')
def get_parser(self, prog_name, history=True):
parser = super(CliResourceList, self).get_parser(prog_name)
parser.add_argument("--details", action='store_true',
help="Show all attributes of generic resources"),
if history:
parser.add_argument("--history", action='store_true',
help="Show history of the resources"),
parser.add_argument("--limit", type=int, metavar="<LIMIT>",
help="Number of resources to return "
"(Default is server default)")
parser.add_argument("--marker", metavar="<MARKER>",
help="Last item of the previous listing. "
"Return the next results after this value")
parser.add_argument("--sort", action="append", metavar="<SORT>",
help="Sort of resource attribute "
"(example: user_id:desc-nullslast")
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
return parser
def _list2cols(self, resources):
"""Return a formatted list of resources."""
if not resources:
return self.COLS, []
cols = list(self.COLS)
for k in resources[0]:
if k not in cols:
cols.append(k)
if 'creator' in cols:
cols.remove('created_by_user_id')
cols.remove('created_by_project_id')
return utils.list2cols(cols, resources)
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.list(
resource_type=parsed_args.resource_type,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
class CliResourceHistory(CliResourceList):
"""Show the history of a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceHistory, self).get_parser(prog_name,
history=False)
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.history(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id,
**utils.get_pagination_o
|
ptions(parsed_args))
if parsed_args.formatter == 'table':
return self._list2cols(list(map(normalize_metrics, resources)))
return self._list2cols(resources)
class CliResourceSearch(CliResourceList):
"""Search resources with specified query rules."""
def get_parser(self, prog_name):
parser = super(CliResourceSearch, self).get_parser(prog_name)
utils.add_query_argument("query", pars
|
er)
return parser
def take_action(self, parsed_args):
resources = utils.get_client(self).resource.search(
resource_type=parsed_args.resource_type,
query=parsed_args.query,
**utils.get_pagination_options(parsed_args))
# Do not dump metrics because it makes the list way too long
for r in resources:
del r['metrics']
return self._list2cols(resources)
def normalize_metrics(res):
res['metrics'] = "\n".join(sorted(
["%s: %s" % (name, _id)
for name, _id in res['metrics'].items()]))
return res
class CliResourceShow(show.ShowOne):
"""Show a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceShow, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of a resource")
return parser
def take_action(self, parsed_args):
res = utils.get_client(self).resource.get(
resource_type=parsed_args.resource_type,
resource_id=parsed_args.resource_id)
if parsed_args.formatter == 'table':
normalize_metrics(res)
return self.dict2columns(res)
class CliResourceCreate(show.ShowOne):
"""Create a resource."""
def get_parser(self, prog_name):
parser = super(CliResourceCreate, self).get_parser(prog_name)
parser.add_argument("--type", "-t", dest="resource_type",
default="generic", help="Type of resource")
parser.add_argument("resource_id",
help="ID of the resource")
parser.add_argument("-a", "--attribute", action='append',
default=[],
help=("name and value of an attribute "
"separated with a ':'"))
parser.add_argument("-m", "--add-metric", action='append',
default=[],
help="name:id of a metric to add"),
parser.add_argument(
"-n", "--create-metric", action='append', default=[],
help="name:archive_policy_name of a metric to create"),
return parser
def _resource_from_args(self, parsed_args, update=False):
# Get the resource type to set the correct type
rt_attrs = utils.get_client(self).resource_type.get(
name=parsed_args.resource_type)['attributes']
resource = {}
if not update:
resource['id'] = parsed_args.resource_id
if parsed_args.attribute:
for attr in parsed_args.attribute:
attr, __, value = attr.partition(":")
attr_type = rt_attrs.get(attr, {}).get('type')
if attr_type == "number":
value = float(value)
elif attr_type == "bool":
value = bool(distutils.util.strtobool(value))
resource[attr] = value
if (parsed_args.add_metric or
parsed_args.create_metric or
(update and parsed_args.delete_metric)):
if update:
r = utils.get_client(self).resource.get(
parsed_args.resource_type,
parsed_args.resource_id)
default = r['metrics']
for metric_name in parsed_args.delete_metric:
try:
del default[metric_name]
except KeyError:
raise exceptions.MetricNotFound(
message="Metric name %s not found" % metric_name)
else:
default = {}
resource['metrics'] = default
for metric in parsed_args.add_metric:
name, _, value = metric.partition(":")
resource['metrics'][name] = value
for metric in parsed_args.create_metric:
name, _, value = metric.partition(":")
|
avlach/univbris-ocf
|
vt_manager/src/python/agent/xen/provisioning/configurators/ofelia/OfeliaDebianVMConfigurator.py
|
Python
|
bsd-3-clause
| 6,611 | 0.039782 |
import shutil
import os
import jinja2
import string
import subprocess
import re
from xen.provisioning.HdManager import HdManager
from settings.settingsLoader import OXA_XEN_SERVER_KERNEL,OXA_XEN_SERVER_INITRD,OXA_DEBIAN_INTERFACES_FILE_LOCATION,OXA_DEBIAN_UDEV_FILE_LOCATION, OXA_DEBIAN_HOSTNAME_FILE_LOCATION, OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION
from utils.Logger import Logger
class OfeliaDebianVMConfigurator:
logger = Logger.getLogger()
''' Private methods '''
@staticmethod
def __configureInterfacesFile(vm,iFile):
#Loopback
iFile.write("auto lo\niface lo inet loopback\n\n")
#Interfaces
for inter in vm.xen_configuration.interfaces.interface :
if inter.ismgmt:
#is a mgmt interface
interfaceString = "auto "+inter.name+"\n"+\
"iface "+inter.name+" inet static\n"+\
"\taddress "+inter.ip +"\n"+\
"\tnetmask "+inter.mask+"\n"
if inter.gw != None and inter.gw != "":
interfaceString +="\tgateway "+inter.gw+"\n"
if inter.dns1 != None and inter.dns1 != "":
interfaceString+="\tdns-nameservers "+inter.dns1
if inter.dns2 != None and inter.dns2 != "":
interfaceString+=" "+inter.dns2
interfaceString +="\n\n"
iFile.write(interfaceString)
else:
#is a data interface
iFile.write("auto "+inter.name+"\n\n")
@staticmethod
def __configureUdevFile(vm,uFile):
for inter in vm.xen_configuration.interfaces.interface:
uFile.write('SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="'+inter.mac+'", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="'+inter.name+'"\n')
@staticmethod
def __configureHostname(vm,hFile):
hFile.write(vm.name)
@staticmethod
def __createParavirtualizationFileHdConfigFile(vm,env):
template_name = "paraVirtualizedFileHd.pt"
template = env.get_template(template_name)
#Set vars&render
output = template.render(
kernelImg=OXA_XEN_SERVER_KERNEL,
initrdImg=OXA_XEN_SERVER_INITRD,
hdFilePath=HdManager.getHdPath(vm),
swapFilePath=HdManager.getSwapPath(vm),
vm=vm)
#write file
cfile = open(HdManager.getConfigFilePath(vm),'w')
cfile.write(output)
cfile.close()
''' Public methods '''
@staticmethod
def getIdentifier():
return OfeliaDebianVMConfigurator.__name__
@staticmethod
def _configureNetworking(vm,path):
#Configure interfaces and udev settings
try:
try:
#B
|
ackup current files
shutil.copy(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,path+OXA_DEBIAN_INTERFACES_FILE_LOCATION+".bak")
shutil.copy(path+OXA_DEBIAN_UDEV_FILE_LOCATION,path+OXA_DEBIAN_UDEV_FILE_LOCATION+".bak")
except Exception as e:
pass
with open(path+OXA_DEBIAN_INTERFACES_FILE_LOCATION,'w') as openif:
OfeliaDebianVMConfigurator.__configureInterfacesFile(vm,openif)
with open(path+OXA_DEBIAN_UDEV_FILE_LOCATION,'w') as openudev:
OfeliaDebianVMConfigura
|
tor.__configureUdevFile(vm,openudev)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error(str(e))
raise Exception("Could not configure interfaces or Udev file")
@staticmethod
def _configureLDAPSettings(vm,path):
try:
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "r")
text = file.read()
file.close()
file = open(path+OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "w")
#Scape spaces and tabs
projectName = string.replace(vm.project_name,' ','_')
projectName = string.replace(projectName,'\t','__')
file.write(text.replace("__projectId","@proj_"+vm.project_id+"_"+projectName))
file.close()
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure LDAP file!! - "+str(e))
@staticmethod
def _configureHostName(vm,path):
try:
with open(path+OXA_DEBIAN_HOSTNAME_FILE_LOCATION,'w') as openhost:
OfeliaDebianVMConfigurator.__configureHostname(vm, openhost)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Could not configure hostname;skipping.. - "+str(e))
@staticmethod
def _configureSSHServer(vm,path):
try:
OfeliaDebianVMConfigurator.logger.debug("Regenerating SSH keys...\n Deleting old keys...")
subprocess.check_call("rm -f "+path+"/etc/ssh/ssh_host_*", shell=True, stdout=None)
#subprocess.check_call("chroot "+path+" dpkg-reconfigure openssh-server ", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH1 key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_key -N '' -t rsa1", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 RSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_rsa_key -N '' -t rsa", shell=True, stdout=None)
OfeliaDebianVMConfigurator.logger.debug("Creating SSH2 DSA key; this may take some time...")
subprocess.check_call("ssh-keygen -q -f "+path+"/etc/ssh/ssh_host_dsa_key -N '' -t dsa", shell=True, stdout=None)
except Exception as e:
OfeliaDebianVMConfigurator.logger.error("Fatal error; could not regenerate SSH keys. Aborting to prevent VM to be unreachable..."+str(e))
raise e
#Public methods
@staticmethod
def createVmConfigurationFile(vm):
#get env
template_dirs = []
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates/'))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs))
if vm.xen_configuration.hd_setup_type == "file-image" and vm.xen_configuration.virtualization_setup_type == "paravirtualization" :
OfeliaDebianVMConfigurator.__createParavirtualizationFileHdConfigFile(vm,env)
else:
raise Exception("type of file or type of virtualization not supported for the creation of xen vm configuration file")
@staticmethod
def configureVmDisk(vm, path):
if not path or not re.match(r'[\s]*\/\w+\/\w+\/.*', path,re.IGNORECASE): #For security, should never happen anyway
raise Exception("Incorrect vm path")
#Configure networking
OfeliaDebianVMConfigurator._configureNetworking(vm,path)
OfeliaDebianVMConfigurator.logger.info("Network configured successfully...")
#Configure LDAP settings
OfeliaDebianVMConfigurator._configureLDAPSettings(vm,path)
OfeliaDebianVMConfigurator.logger.info("Authentication configured successfully...")
#Configure Hostname
OfeliaDebianVMConfigurator._configureHostName(vm,path)
OfeliaDebianVMConfigurator.logger.info("Hostname configured successfully...")
#Regenerate SSH keys
OfeliaDebianVMConfigurator._configureSSHServer(vm,path)
OfeliaDebianVMConfigurator.logger.info("SSH have been keys regenerated...")
|
theskumar/django-unsubscribe
|
unsubscribe/tests/test_utils.py
|
Python
|
bsd-3-clause
| 329 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
|
from django.test import TestCase
class UtilsTests(TestCase):
"""docstring for UtilsTests"""
def setUp(self):
self.username = 'theskumar'
self.email = 'theskumar@example.com'
def test_foo(self):
self.assertEqual('foo',
|
"foo")
|
fbeutler/Metropolis-Hastings
|
Metropolis_Hastings2.py
|
Python
|
gpl-2.0
| 579 | 0.008636 |
import numpy as np
import matplotli
|
b.pyplot as pl
def f(x):
return np.exp(-x**2)
def main():
N = 100000
x = np.arange(N,dtype=np.float)
x[0] = 0.2
counter = 0
for i in range(0, N-1):
x_next = np.random.normal(x[i], 1.)
if np.random.random_sample() < min(1, f(x_next)/f(x[i])):
|
x[i+1] = x_next
counter = counter + 1
else:
x[i+1] = x[i]
print("acceptance fraction is ", counter/float(N))
pl.hist(x, bins=50, color='blue')
pl.show()
if __name__ == '__main__':
main()
|
google/TaglessCRM
|
src/plugins/pipeline_plugins/hooks/ads_uac_hook.py
|
Python
|
apache-2.0
| 9,520 | 0.004832 |
# python3
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom hook for Google Ads UAC.
For UAC details refer to
https://developers.google.com/adwords/api/docs/guides/mobile-app-campaigns
"""
import enum
import json
import re
from typing import Any, Dict, Optional
import urllib.parse
from airflow.hooks import http_hook
from plugins.pipeline_plugins.hooks import output_hook_interface
from plugins.pipeline_plugins.utils import async_utils
from plugins.pipeline_plugins.utils import blob
from plugins.pipeline_plugins.utils import errors
# RDID (raw device id) should be in UUID format.
_RDID_PATTERN = '^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]
|
{3}-?[a-f0-9]{12}$'
_RDID_REGEX = re.compile(_RDID_PATTERN, re.IGNORECAS
|
E)
_APP_CONVERSION_TRACKING_PATH = 'pagead/conversion/app/1.0'
_REQUIRED_FIELDS = ('dev_token',
'link_id',
'app_event_type',
'rdid',
'id_type',
'lat',
'app_version',
'os_version',
'sdk_version',
'timestamp')
class AppEventType(enum.Enum):
FIRST_OPEN = 'first_open'
SESSION_START = 'session_start'
IN_APP_PURCHASE = 'in_app_purchase'
VIEW_ITEM_LIST = 'view_item_list'
VIEW_ITEM = 'view_item'
VIEW_SEARCH_RESULTS = 'view_search_results'
ADD_TO_CART = 'add_to_cart'
ECOMMERCE_PURCHASE = 'ecommerce_purchase'
CUSTOM = 'custom'
class IdType(enum.Enum):
ANDROID = 'advertisingid'
IOS = 'idfa'
class EventStatus(enum.Enum):
SUCCESS = enum.auto()
FAILURE = enum.auto()
class AdsUniversalAppCampaignHook(
http_hook.HttpHook, output_hook_interface.OutputHookInterface):
"""Custom hook for Google Ads UAC API.
API SPEC for Apps Conversion Tracking and Remarketing
https://developers.google.com/app-conversion-tracking/api/request-response-specs
"""
def __init__(self, ads_uac_conn_id: str = 'google_ads_uac_default',
ads_uac_dry_run: bool = False, **kwargs) -> None:
"""Initializes the generator of a specified BigQuery table.
Args:
ads_uac_conn_id: Connection id passed to airflow.
ads_uac_dry_run: If true the hook will not send real hits to the endpoint.
**kwargs: Other optional arguments.
"""
super().__init__(http_conn_id=ads_uac_conn_id)
self.dry_run = ads_uac_dry_run
def _get_developer_token(self) -> str:
"""Gets developer token from connection configuration.
Returns:
dev_token: Developer token of Google Ads API.
Raises:
DataOutConnectorValueError: If connection is not available or if password
is missing in the connection.
"""
conn = self.get_connection(self.http_conn_id)
if not conn:
raise errors.DataOutConnectorValueError(
'Cannot get connection {id}.'.format(id=self.http_conn_id),
errors.ErrorNameIDMap
.RETRIABLE_ADS_UAC_HOOK_ERROR_FAIL_TO_GET_AIRFLOW_CONNECTION)
if not conn.password:
raise errors.DataOutConnectorValueError(
'Missing dev token. Please check connection {id} and its password.'
.format(id=self.http_conn_id),
errors.ErrorNameIDMap.RETRIABLE_ADS_UAC_HOOK_ERROR_MISSING_DEV_TOKEN)
return conn.password
def _validate_app_conversion_payload(self, payload: Dict[str, Any]) -> None:
"""Validates payload sent to UAC.
Args:
payload: The payload to be validated before sending to Google Ads UAC.
Raises:
DataOutConnectorValueError: If some value is missing or in wrong format.
"""
for key in _REQUIRED_FIELDS:
if payload.get(key) is None:
raise errors.DataOutConnectorValueError(
"""Missing {key} in payload.""".format(key=key),
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_MISSING_MANDATORY_FIELDS)
if payload.get('app_event_type') not in [item.value
for item in AppEventType]:
raise errors.DataOutConnectorValueError(
"""Unsupported app event type in
payload. Example: 'first_open', 'session_start', 'in_app_purchase',
'view_item_list', 'view_item', 'view_search_results',
'add_to_cart', 'ecommerce_purchase', 'custom'.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_UNSUPPORTED_APP_EVENT_TYPE)
if (payload.get('app_event_name') and
payload.get('app_event_type') != 'custom'):
raise errors.DataOutConnectorValueError(
"""App event type must be 'custom' when app event name exists.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_APP_EVENT_TYPE)
match = _RDID_REGEX.match(payload.get('rdid'))
if not match:
raise errors.DataOutConnectorValueError(
"""Wrong raw device id format in
payload. Should be compatible with RFC4122.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_FORMAT)
if payload.get('id_type') not in [item.value for item in IdType]:
raise errors.DataOutConnectorValueError(
"""Wrong raw device id type in
payload. Example: 'advertisingid', 'idfa'.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_RAW_DEVICE_ID_TYPE)
if payload.get('lat') != 0 and payload.get('lat') != 1:
raise errors.DataOutConnectorValueError(
"""Wrong limit-ad-tracking status in payload. Example: 0, 1.""",
errors.ErrorNameIDMap.ADS_UAC_HOOK_ERROR_WRONG_LAT_STATUS)
def send_conversions_to_uac(
self, params: Dict[str, Any]) -> Optional[Dict[str, Any]]:
"""Sends conversion to UAC via S2S REST API.
Args:
params: Parameters containing required data for app conversion tracking.
Returns:
results: Includes request body, status_code, error_msg, response body and
dry_run flag.
The response refers to the definition of conversion tracking response in
https://developers.google.com/app-conversion-tracking/api/request-response-specs#conversion_tracking_response
"""
try:
request_params = dict(params)
request_params['dev_token'] = self._get_developer_token()
app_event_data = request_params.get('app_event_data')
if 'app_event_data' in request_params:
del request_params['app_event_data']
self._validate_app_conversion_payload(request_params)
except errors.DataOutConnectorValueError as error:
self.log.exception(error)
return {'request': params,
'status_code': 400,
'error_msg': str(error),
'dry_run': self.dry_run}
self.method = 'POST'
query_url = urllib.parse.urlencode(request_params)
complete_url = ('{path}?{default_query}'
.format(
path=_APP_CONVERSION_TRACKING_PATH,
default_query=query_url))
if self.dry_run:
self.log.debug(
"""Dry run mode: Sending conversion tracking data to UAC.
URL:{}. App event data:{}."""
.format(complete_url, json.dumps(app_event_data)))
return {'request': params,
'status_code': 500,
'error_msg': 'Dry run mode',
'dry_run': self.dry_run}
response = None
extra_options = {'check_response': False}
self.log.info(
"""Not Dry run mode: Sending conversion tracking data to UAC.
URL:{}. App event data:{}."""
.format(complete_url, json.dumps(app_event_data)))
response = self.run(endpoint=complete_url,
data=app_event_data,
extra_options=extra_options)
try:
body = res
|
nickpack/reportlab
|
src/reportlab/pdfgen/pdfgeom.py
|
Python
|
bsd-3-clause
| 3,119 | 0.00545 |
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/pdfgen/pdfgeom.py
__version__=''' $Id: pdfgeom.py 3959 2012-09-27 14:39:39Z robin $ '''
__doc__="""
This module includes any mathematical methods needed for PIDDLE.
It should have no dependencies beyond the Python library.
So far, just Robert Kern's bezierArc.
"""
from math import sin, cos, pi, ceil
def bezierArc(x1,y1, x2,y2, startAng=0, extent=90):
"""bezierArc(x1,y1, x2,y2, startAng=0, extent=90) --> List of Bezier
curve control points.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle. The
coordinate system has coordinates that increase to the right and down.
Angles, measured in degress, start with 0 to the right (the positive X
axis) and increase counter-clockwise. The arc extends from startAng
to startAng+extent. I.e. startAng=0 and extent=180 yields an openside-down
semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and
(x3, y3) as their respective Bezier control points."""
x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)
if abs(extent) <= 90:
arcList = [startAng]
fragAngle = float(extent)
Nfrag = 1
else:
arcList = []
Nfrag = int(ceil(abs(extent)/90.))
fragAngle = float(extent) / Nfrag
x_cen = (x1+x2)/2.
y_cen = (y1+y2)/2.
r
|
x = (x2-x1)/2.
ry = (y2-y1)/2.
halfAng = fragAngle * pi / 360.
kappa = abs(4. / 3. * (1. - cos(halfAng)) / sin(halfAng))
if fragAngle < 0:
sign = -1
else:
sign = 1
pointList = []
for i in range(Nfrag):
theta0 = (startAng + i*fragAngle) * pi / 180.
theta1 = (startAng + (i+1)*fragAngle) *pi / 180.
if fragAngle > 0:
|
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) - kappa * sin(theta0)),
y_cen - ry * (sin(theta0) + kappa * cos(theta0)),
x_cen + rx * (cos(theta1) + kappa * sin(theta1)),
y_cen - ry * (sin(theta1) - kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
else:
pointList.append((x_cen + rx * cos(theta0),
y_cen - ry * sin(theta0),
x_cen + rx * (cos(theta0) + kappa * sin(theta0)),
y_cen - ry * (sin(theta0) - kappa * cos(theta0)),
x_cen + rx * (cos(theta1) - kappa * sin(theta1)),
y_cen - ry * (sin(theta1) + kappa * cos(theta1)),
x_cen + rx * cos(theta1),
y_cen - ry * sin(theta1)))
return pointList
|
thiswind/nn_practice
|
tensorflow/calculate_pi_old.py
|
Python
|
gpl-3.0
| 581 | 0.032702 |
import tensorflow as tf
import matplotlib.pyplot as plt
import math
x_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='x_node')
y_node = tf.random_uniform([1], minval=-1, maxval=1, dtype=tf.float32,
name='y_node')
times = 5000
hits = 0
pis = []
with tf.Session() as session:
for i in range(1, times):
x = session.run(x_node)
y = session.run(y_node)
if x*x + y*y < 1:
hits += 1
pass
pi = 4 * float(hits) / i
print(pi)
pis.append(pi)
pass
pass
plt.plot(pis)
plt.plot([0, times],
|
[math.pi, math.
|
pi])
plt.show()
|
theonion/djes
|
djes/mapping.py
|
Python
|
mit
| 5,823 | 0.002919 |
from django.db import models
from django.db.models.fields.related import ManyToOneRel, ForeignObjectRel
from elasticsearch_dsl.mapping import Mapping
from elasticsearch_dsl.field import Field
from djes.conf import settings
FIELD_MAPPINGS = {
"AutoField": {"type": "long"},
"BigIntegerField": {"type": "long"},
"BinaryField": {"type": "binary"},
"BooleanField": {"type": "boolean"},
"CharField": {"type": "string"},
"CommaSeparatedIntegerField": {"type": "string"},
"DateField": {"type": "date"},
"DateTimeField": {"type": "date"},
"DecimalField": {"type": "string"},
"DurationField": {"type": "long"},
"EmailField": {"type": "string"},
# "FileField": {"type": ""}, # TODO: make a decision on this
"FilePathField": {"type": "string"},
"FloatField": {"type": "double"},
# "ImageField": {"type": ""}, # TODO: make a decision on this
"IntegerField": {"type": "long"},
"IPAddressField": {"type": "string", "index": "not_analyzed"},
"GenericIPAddressField": {"type": "string", "index": "not_analyzed"},
"NullBooleanField": {"type": "boolean"},
"PositiveIntegerField": {"type": "long"},
"PositiveSmallIntegerField": {"type": "long"},
"SlugField": {"type": "string", "index": "not_analyzed"},
"SmallIntegerField": {"type": "long"},
"TextField": {"type": "string"},
"TimeField": {"type": "string"},
"URLField": {"type": "string"},
"UUIDField": {"type": "string", "index": "not_analyzed"},
"ForeignKey": {"type": "long"},
"ManyToManyField": {"type": "long"},
"OneToOneField": {"type": "long"},
}
def get_first_mapping(cls):
"""This allows for Django-like inheritance of mapping configurations"""
from .models import Indexable
if issubclass(cls, Indexable)
|
and hasattr(cls, "Mapping"):
return cls.Mapping
for base in cls.__bases__:
mapping = get_first_mapping(base)
if mapping:
return mapping
return None
class EmptyMeta(object):
pass
class DjangoMapping(Mapping):
"""A subclass of the elasticsearch_dsl Mapping, allowing the automatic mapping
of many fields on the model, while letting the developer override these settings"""
def __init__(sel
|
f, model):
from .models import Indexable
self.model = model
if not hasattr(self, "Meta"):
self.Meta = EmptyMeta
default_name = "{}_{}".format(self.model._meta.app_label, self.model._meta.model_name)
name = getattr(self.Meta, "doc_type", default_name)
super(DjangoMapping, self).__init__(name)
self._meta = {}
excludes = getattr(self.Meta, "excludes", [])
includes = getattr(self.Meta, "includes", [])
for field in self.model._meta.get_fields():
if field.auto_created and field.is_relation:
if not hasattr(field, "rel") or not field.rel.parent_link:
continue
db_column, attname = field.db_column, field.attname
manual_field_mapping = getattr(self, field.name, None)
# TODO: I am 90% shirt this is not being utilized. Test later.
if manual_field_mapping:
self.field(field.name, manual_field_mapping)
continue
if field.name in excludes:
continue
self.configure_field(field)
# Now any included relations
for name in includes:
field = self.model._meta.get_field(name)
self.configure_field(field)
# Now any custom fields
for field in dir(self.__class__):
manual_field_mapping = getattr(self, field)
if field not in self.properties.properties.to_dict() and isinstance(manual_field_mapping, Field):
self.field(field, manual_field_mapping)
if getattr(self.Meta, "dynamic", "strict") == "strict":
self.properties._params["dynamic"] = "strict"
def configure_field(self, field):
"""This configures an Elasticsearch Mapping field, based on a Django model field"""
from .models import Indexable
# This is for reverse relations, which do not have a db column
if field.auto_created and field.is_relation:
if isinstance(field, (ForeignObjectRel, ManyToOneRel)) and issubclass(field.related_model, Indexable):
related_properties = field.related_model.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
if field.get_internal_type() == "ManyToManyField" and issubclass(field.rel.to, Indexable):
related_properties = field.rel.to.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
if isinstance(field, models.ForeignKey):
# This is a related field, so it should maybe be nested?
# We only want to nest fields when they are indexable, and not parent pointers.
if issubclass(field.rel.to, Indexable) and not field.rel.parent_link:
related_properties = field.rel.to.search_objects.mapping.properties.properties.to_dict()
self.field(field.name, {"type": "nested", "properties": related_properties})
return
db_column, attname = field.db_column, field.attname
field_args = FIELD_MAPPINGS.get(field.get_internal_type())
if field_args:
self.field(db_column or attname, field_args)
else:
raise Warning("Can't find {}".format(field.get_internal_type()))
@property
def index(self):
return getattr(self.Meta, "index", settings.ES_INDEX)
|
asimshankar/tensorflow
|
tensorflow/contrib/model_pruning/python/pruning.py
|
Python
|
apache-2.0
| 21,323 | 0.00469 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions to add support for magnitude-based model pruning.
# Adds variables and ops to the graph to enable
# elementwise masking of weights
apply_mask(weights)
# Returns a list containing the sparsity of each of the weight tensors
get_weight_sparsity()
# Returns a list of all the masked weight tensorflow variables
get_masked_weights()
# Returns a list of all the mask tensorflow variables
get_masks()
# Returns a list of all the thresholds
get_thresholds()
# Returns a list of all the weight tensors that have been masked
get_weights()
The Pruning class uses a tf.hparams object to set up the
parameters for a model pruning. Here's a typical usage:
# Parse pruning hyperparameters
pruning_hparams = pruning.get_pruning_hparams().parse(FLAGS.pruning_hparams)
# Create a pruning object using the pruning_hparams
p = pruning.Pruning(pruning_hparams)
# Add mask update ops to the graph
mask_update_op = p.conditional_mask_update_op()
# Add the summaries
p.add_pruning_summaries()
# Run the op
session.run(mask_update_op)
# An object of the pruning also accepts externally defined sparsity:
sparsity = tf.Variable(0.5, name = "ConstantSparsity")
p = pruning.Pruning(pruning_hparams, sparsity=sparsity)
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.model_pruning.python import pruning_utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import training_util
_MASK_COLLECTION = core.MASK_COLLECTION
_THRESHOLD_COLLECTION = core.THRESHOLD_COLLECTION
_MASKED_WEIGHT_COLLECTION = core.MASKED_WEIGHT_COLLECTION
_WEIGHT_COLLECTION = core.WEIGHT_COLLECTION
_MASKED_WEIGHT_NAME = core.MASKED_WEIGHT_NAME
def apply_mask(x, scope=''):
"""Apply mask to a given weight tensor.
Args:
x: Input weight tensor
scope: The current variable scope. Defaults to "".
Returns:
Tensor representing masked_weights
"""
mask = pruning_utils.weight_mask_variable(x, scope)
threshold = pruning_utils.weight_threshold_variable(x, scope)
# Add masked_weights in the weights namescope so as to make it easier
# for the quantization library to add quant ops.
masked_weights = math_ops.multiply(mask, x, _MASKED_WEIGHT_NAME)
# Make sure the mask for a given variable are not added multiple times to the
# collection. This is particularly important when applying mask to RNN's
# weight variables
if mask not in ops.get_collection_ref(_MASK_COLLECTION):
ops.add_to_collection(_THRESHOLD_COLLECTION, threshold)
ops.add_to_collection(_MASK_COLLECTION, mask)
ops.add_to_collection(_MASKED_WEIGHT_COLLECTION, masked_weights)
ops.add_to_collection(_WEIGHT_COLLECTION, x)
return masked_weights
def get_masked_weights():
return ops.get_collection(_MASKED_WEIGHT_COLLECTION)
def get_masks():
return ops.get_collection(_MASK_COLLECTION)
def get_thresholds():
return ops.get_collection(_THRESHOLD_COLLECTION)
def get_weights():
return ops.get_collection(_WEIGHT_COLLECTION)
def get_weight_sparsity():
"""Get sparsity of the weights.
Args:
None
Returns:
A list containing the sparsity of each of the weight tensors
"""
masks = get_masks()
return [nn_impl.zero_fraction(mask) for mask in masks]
def get_pruning_hparams():
"""Get a tf.HParams object with the default values for the hyperparameters.
name: string
name of the pruning specification. Used for adding summaries and ops under
a common tensorflow name_scope
begin_pruning_step: integer
the global step at which to begin pruning
end_pruning_step: integer
the global step at which to terminate pruning. Defaults to -1 implying
that pruning continues till the training stops
weight_sparsity_map: list of strings
comma separed list of weight variable name:target sparsity pairs.
For layers/weights not in this list, sparsity as specified by the
target_sparsity hyperparameter is used.
Eg. [conv1:0.9,conv2/kernel:0.8]
threshold_decay: float
the decay factor to use for exponential decay of the thresholds
pruning_frequency: integer
How often should the masks be updated? (in # of global_steps)
nbins: integer
number of bins to use for histogram computation
block_height: integer
number of rows in a block (defaults to 1)
block_width: integer
number of cols in a block (defaults to 1)
block_pooling_function: string
Whether to perform average (AVG) or max (MAX) pooling in the block
(default: AVG)
initial_sparsity: float
initial sparsity value
target_sparsity: float
target sparsity value
sparsity_function_begin_step: integer
the global step at this which the gradual sparsity function begins to
take effect
sparsity_function_end_step: integer
the global step used as the end point for t
|
he gradual sparsity function
sparsity_function_exponent: float
exponent = 1 is linearly varying sparsity between initial and final.
exponent > 1 varies more slowly towards the end than the beginning
use_tpu: False
Indicates whether to use TPU
We use the following sparsity function:
num_steps = (sparsity_function_end_step -
sparsity_function_begin_step)/pruning_frequency
sparsity(step) = (initial_sparsity - ta
|
rget_sparsity)*
[1-step/(num_steps -1)]**exponent + target_sparsity
Args:
None
Returns:
tf.HParams object initialized to default values
"""
return hparam.HParams(
name='model_pruning',
begin_pruning_step=0,
end_pruning_step=-1,
weight_sparsity_map=[''],
threshold_decay=0.0,
pruning_frequency=10,
nbins=256,
block_height=1,
block_width=1,
block_pooling_function='AVG',
initial_sparsity=0.0,
target_sparsity=0.5,
sparsity_function_begin_step=0,
sparsity_function_end_step=100,
sparsity_function_exponent=3,
use_tpu=False)
class Pruning(object):
def __init__(self, spec=None, global_step=None, sparsity=None):
"""Set up the specification for model pruning.
If a spec is provided, the sparsity is set up based on the sparsity_function
in the spec. The effect of sparsity_function is overridden if the sparsity
variable is passed to the constructor. This enables setting up arbitrary
sparsity profiles externally and passing it to this pruning functions.
Args:
spec: Pruning spec as defined in pruning.proto
global_step: A tensorflow variable that is used while setting up the
sparsity function
sparsity: A tensorf
|
tensorflow/lingvo
|
lingvo/jax/base_model.py
|
Python
|
apache-2.0
| 37,124 | 0.004067 |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for all models.
The model solely consists of the network, while the task combines one or several
models with one or several learners/optimizers.
"""
from typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union
import jax
from jax import numpy as jnp
from lingvo.jax import base_input
from lingvo.jax import base_layer
from lingvo.jax import layers
from lingvo.jax import metric_utils
from lingvo.jax import py_utils
from lingvo.jax import train_states
NestedMap = py_utils.NestedMap
JTensor = base_layer.JTensor
InstantiableParams = py_utils.InstantiableParams
Predictions = Union[JTensor, NestedMap, Dict[str, Any]]
Metrics = Dict[str, Tuple[JTensor, JTensor]]
TrainState = train_states.TrainState
def _compute_xent_loss_helper(
predictions: NestedMap, input_batch: NestedMap,
return_predictions: bool) -> Tuple[Metrics, Dict[str, Any]]:
"""Helper for computing the xent loss for Language model and Sequence model.
Args:
predictions: A `.NestedMap` containing the keys `per_example_argmax`,
`total_loss`, `avg_xent`, `aux_loss`, `total_weight` which corresponds to
the output of the Softmax layer.
input_batch: A `.NestedMap` object containing input tensors which contains
the keys `labels` and `weights` which corresponds to the labels and the
`weights` for each token in the sequence.
return_predictions: Whether to return predictions, which can be more
expensive.
Returns:
- A dict or NestedMap containing str keys and (metric, weight) pairs as
values, where one of the entries is expected to correspond to the loss.
- A dict containing arbitrary tensors describing something about each
training example, where the first dimension of each tensor is the batch
index. The base class just returns an empty dict.
"""
if 'tgt' in input_batch:
labels = input_batch.tgt.labels
if 'paddings' in input_batch.tgt:
weights = 1.0 - input_batch.tgt.paddings
else:
weights = jnp.not_equal(input_batch.tgt.segment_ids, 0)
weights = weights.astype(labels.dtype)
else:
labels = input_batch.labels
weights = input_batch.weights
predicted_labels = predictions.per_example_argmax.astype(labels.dtype)
num_preds = predictions.total_weight
mean_acc = jnp.sum(
(labels == predicted_labels) * weights) / jnp.maximum(num_preds, 1)
metric_weight = jnp.array(num_preds, predictions.avg_xent.dtype)
if hasattr(predictions, 'avg_xent_weight'):
avg_xent_weight = predictions.avg_xent_weight
else:
avg_xent_weight = metric_weight
metrics = NestedMap(
total_loss=(predictions.total_loss, metric_weight),
|
avg_xe
|
nt=(predictions.avg_xent, avg_xent_weight),
aux_loss=(predictions.aux_loss, jnp.array(1.0,
predictions.aux_loss.dtype)),
log_pplx=(predictions.avg_xent, avg_xent_weight),
fraction_of_correct_next_step_preds=(mean_acc, metric_weight),
num_predictions=(num_preds, jnp.array(1.0, num_preds.dtype)),
)
per_example_output = NestedMap()
if return_predictions:
per_example_output = predictions
return metrics, per_example_output
def greedy_decode(extend_step_fn: Callable[[NestedMap, JTensor],
Tuple[NestedMap, JTensor]],
decoder_state: NestedMap,
target_ids: JTensor,
target_paddings: JTensor,
seq_len: int,
max_decode_steps: Optional[int] = None,
prefix_lengths: Optional[JTensor] = None,
eos_id: Optional[int] = None) -> NestedMap:
"""Greedy decode the input batch.
Args:
extend_step_fn: A function that takes in `states` and the decoded sequence
at the current time step (with shape [B] or [B, P] where B corresponds to
the batch size and P corresponds to a possible prefix) and returns a tuple
of (`NestedMap`, `JTensor`), where the first `NestedMap` corresponds to
the `new_states` and the second `JTensor` corresponds to the logits of the
next step.
decoder_state: The initialized cache for autoregressive cached decoding.
target_ids: The token ids that correspond to the target sequence.
target_paddings: The paddings corresponding to the target sequence, with a 1
denoting padding token and 0 denoting non-padding tokens.
seq_len: The output sequence length to decode to.
max_decode_steps: Python int or None, the max decode step to run after the
prefix (if any). Since the prefixes might be of unequal lengths, this
value is not equivalent with `seq_len` above. When None, decode steps is
only limited by `seq_len` above.
prefix_lengths: Optional argument supplying a prefix sizes to initialize the
model to decode from a certain target prefix for each position in the
batch. This can either be None or a JTensor of shape [batch] signifying
the prefix length for each sequence in the batch.
eos_id: Optional EOS id which to terminate the decoding early.
Returns:
A NestedMap with `.prefix_lengths` (indicating the lengths of prefixes for
each target sequence), `.output_ids` (matrix of int ids with the
decoded output), `.decode_lengths` (vector of ints indicating the lengths
of non-padding tokens in `.output_ids`, which includes the prefix), and
`.logprobs` (the log probability of selected tokens, including the prefix,
where a positive value of 1.0 is used to indicate padded positions).
"""
if seq_len <= 0:
raise ValueError('The sequence length for decoding must be > 0, '
f'current value = {seq_len}.')
max_decode_steps = max_decode_steps or seq_len
batch_size = target_ids.shape[0]
# If prefix length is not specified set it to 0.
if prefix_lengths is None:
prefix_lengths = jnp.zeros([batch_size], dtype=jnp.int32)
output_ids = jnp.zeros(shape=(batch_size, seq_len), dtype=jnp.int32)
output_ids = output_ids.at[:, 0].set(target_ids[:, 0])
val = NestedMap()
val.state = decoder_state
val.step = 0
val.output_ids = output_ids
# Shape [batch_size], whether each row has terminated and should stop.
val.done = jnp.zeros(shape=batch_size, dtype=jnp.bool_)
val.decode_lengths = jnp.ones_like(prefix_lengths) * seq_len
# We use a positive value of 1.0 to indicate blank or padded positions.
val.logprobs = jnp.ones_like(output_ids, dtype=jnp.float32)
def cond_func(val):
"""Whether the while loop should continue."""
# We continue the greedy search iff both:
# (1) We have yet to exceed the max steps set by p.decoder.seqlen, AND;
# (2) At least one row in the batch has not terminated.
length_ok = val.step < seq_len - 1
all_rows_done = jnp.all(val.done)
return jnp.logical_and(length_ok, jnp.logical_not(all_rows_done))
def loop_body(val):
"""From ids at `step`, update output ids at `step + 1`."""
step = val.step
decoder_state, logits = extend_step_fn(val.state, val.output_ids[:, step])
logprobs = jax.nn.log_softmax(logits.astype(jnp.float32))
val.state = decoder_state
# When step becomes prefix_length - 1, the new output has index beyond
# the known prefix.
# If prefix_length is 0, the condition is always False, so we take the
# decoded output rather than the prefix.
new_ids = jnp.where(step < prefix_lengths - 1, target_ids[:, step + 1]
|
PhonologicalCorpusTools/PolyglotDB
|
polyglotdb/query/base/complex.py
|
Python
|
mit
| 2,772 | 0.000722 |
class ComplexClause(object):
type_string = ''
def __init__(self, *args):
self.clauses = args
self.add_prefix(self.type_string)
def is_matrix(self):
for c in self.clauses:
if not c.is_matrix():
return False
return True
def involves(self, annotation):
for c in self.clauses:
if c.involves(annotation):
return True
return False
@property
def nodes(self):
"""
Get all annotations involved in the clause.
"""
nodes = []
for a in self.clauses:
nodes.extend(a.nodes)
return nodes
@property
def in_subquery(self):
for a in self.clauses:
if a.in_subquery:
return True
return False
@property
def attributes(self):
"""
Get all attributes involved in the clause.
"""
attributes = []
for a in self.clauses:
attributes.extend(a.attributes)
return attributes
def add_prefix(self, prefix):
"""
Adds a prefix to a clause
Parameters
----------
prefix : str
the prefix to add
"""
for i, c in enumerate(self.clauses):
if isinstance(c, ComplexClause):
c.add_prefix(prefix + str(i))
else:
try:
c.value_alias_prefix += prefix + str(i)
except AttributeError:
pass
def generate_params(self):
"""
Generates dictionary of parameters of ComplexClause
Returns
-------
params : dict
a dictionary of parameters
"""
from .attributes import NodeAttribute
params = {}
for c in self.clauses:
if isinstance(c, ComplexClause):
params.update(c.generate_params())
else:
try:
if not isinstance(c.value, NodeAttribute):
params[c.cypher_value_string()[1:-1].replace('`', '')] = c.value
except AttributeError:
pass
return params
class or_(ComplexClause):
type_string = 'or_'
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' OR '.join(x.for_cypher() for x in self.clauses)
temp = "(" + temp + ")"
return temp
class and_(ComplexClause):
type_string = 'and_'
|
def for_cypher(self):
"""
Return a Cypher representation of the clause.
"""
temp = ' AND '.join(x.for_cypher() for x in self.clauses)
temp
|
= "(" + temp + ")"
return temp
|
crypotex/taas
|
taas/user/views.py
|
Python
|
gpl-2.0
| 6,005 | 0.001166 |
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import views as auth_views, get_user_model, update_session_auth_hash, logout
from django.contrib.auth.tokens import default_token_generator
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse_lazy, reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.utils.translation import ugettext_lazy as _
from django.views.generic import CreateView, UpdateView, FormView
from taas.reservation.views import get_payment_order, get_payment_mac
from taas.user import forms
from taas.user import mixins
from taas.user import models
from taas.user import tasks
logger = logging.getLogger(__name__)
class UserCreateView(CreateView):
success_message = _('User has been successfully registered.')
success_url = reverse_lazy('homepage')
template_name = 'user_registration.html'
model = models.User
form_class = forms.UserCreationForm
def form_valid(self, form):
self.object = form.save()
tasks.email_admin_on_user_registration.delay(self.object.id)
messages.success(self.request, self.success_message)
logger.info('Unverified user with email %s has been successfully registered.'
% form.cleaned_data.get('email'))
return HttpResponseRedirect(self.get_success_url())
class UserUpdateView(mixins.LoggedInMixin, UpdateView):
success_message = _('Information has been updated.')
success_url = reverse_lazy('user_update_form')
template_name = 'user_update.html'
model = models.User
form_class = forms.UserUpdateForm
def get_object(self, queryset=None):
return self.request.user
def get_context_data(self, **kwargs):
kwargs['pin'] = self.request.user.pin
return super(UserUpdateView, self).get_context_data(**kwargs)
def form_valid(self, form):
self.object = form.save()
update_session_auth_hash(self.request, self.object)
messages.success(self.request, self.success_message)
logger.info('User with email %s has been been updated.' % form.cleaned_data.get('email'))
return HttpResponseRedirect(self.get_success_url())
class UserDeactivateView(mixins.LoggedInMixin, SuccessMessageMixin, FormView):
success_message = _('User has been deactivated.')
form_class = forms.UserDeactivateForm
template_name = 'user_deactivate.html'
success_url = reverse_lazy('homepage')
def get_form_kwargs(self):
kwargs = super(UserDeactivateView, self).get_form_kwargs()
kwargs['user'] = self.request.user
return kwargs
def form_valid(self, form):
self.request.user.is_active = False
self.request.user.save()
tasks.email_admin_on_user_deactivation.delay(self.request.user.id)
logger.info('User with email %s has been been deactivated.' % form.cleaned_data.get('email'))
logout(self.request)
return super(UserDeactivateView, self).form_valid(form)
def password_reset(request):
kwargs = {
'template_name': 'password_reset/form.html',
'email_template_name': 'password_reset/email.html',
'subject_template_name': 'password_reset/subject.html',
'post_reset_redirect': reverse_lazy('homepage')
}
if request.method == 'POST' and request.POST.get('email'):
messages.add_message(request, messages.SUCCESS, _('Email instructions have been sent.'),
fail_silently=True)
response = auth_views.password_reset(request, **kwargs)
return response
def password_reset_confirm(request, uidb64=None, token=None):
template_name = 'password_reset/confirm.html'
post_reset_redirect = reverse('homepage')
token_generator = default_token_generator
set_password_form = forms.CustomPasswordSetForm
UserModel = get_user_model()
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
if request.method == 'POST':
form = set_password_form(user, r
|
equest.POST)
if form.is_valid():
form.save()
messages.add_
|
message(request, messages.SUCCESS,
_('Your password has been set. You may go ahead and log in now.'),
fail_silently=True)
logger.info('Password for user %s has been reset.'
% user.email)
return HttpResponseRedirect(post_reset_redirect)
else:
title = _('Password reset unsuccessful')
else:
form = set_password_form(user)
title = _('Enter new password')
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
return TemplateResponse(request, template_name, context)
class AddBalanceView(mixins.LoggedInMixin, SuccessMessageMixin, FormView):
form_class = forms.AddBalanceForm
template_name = 'update_budget.html'
def form_valid(self, form):
amount = form.cleaned_data['amount']
payment = get_payment_order(amount, 'B%s' % self.request.user.id)
mac = get_payment_mac(payment)
host = settings.MAKSEKESKUS['host']
return render_to_response('proceed_budget.html', {'json': payment, 'mac': mac, 'host': host})
|
hylje/Lyra
|
lyra/forms.py
|
Python
|
bsd-3-clause
| 3,064 | 0.00359 |
# -*- encoding: utf-8 -*-
import datetime
from django import forms
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from django.core import exceptions as django_exceptions
from lyra import models
class Reservation(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.person = kwargs.pop("person")
self.namespace = kwargs.pop("namespace")
self.queryset = kwargs.pop("queryset")
super(Reservation, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(Reservation, self).clean()
if all(k in cleaned_data for k in ("start", "stop")):
start = cleaned_data["start"]
stop = cleaned_data["stop"]
if start > stop:
(self._errors
.setdefault("start", self.error_class())
.append(_("The reservation should begin before it ends")))
return cleaned_data
def save(self, commit=True, **kwargs):
obj = super(Reservation, self).save(commit=False, **kwargs)
if not obj.pk:
obj.person = self.person
obj.namespace = self.namespace
if commit:
obj.save()
return obj
class Meta:
model = models.Reservation
exclude = ("namespace", "person", "long_description_markup")
widgets = {
"style": forms.Select(attrs={"class": "schedule_style"}),
}
class Media:
js = ("shared/js/sivari.stylepreview.js",)
class ReservationExclusive(Reservation):
def toggle_enabled(self, cleaned_data):
return (not hasattr(self, "exclusive")
and cleaned_data.get("exclusive"))
def clean(self):
cleaned_data = super(ReservationExclusive, self).clean()
start_date = cleaned_data.get("start")
stop_date = cleaned_data.get("stop")
if start_date and stop_date and self.toggle_enabled(cleaned_data):
would_conflict = self.queryset.date_range(
start_date, stop_date)
if self.instance.pk:
would_conflict = would_conflict.exclude(pk=self.instance.pk)
if would_conflict.count():
(self._errors
.setdefault("start", self.error_class())
.append(_(u"The reservation would conflict with %(conflict_count)s "
u"other reservations.") % {
"conflict_count": would_conflict}))
return c
|
leaned_data
class ReservationExclusiveEnable(ReservationExclusive):
exclusive = forms.BooleanField(
label=_(u"No overlap"),
required=False)
class ReservationExclusiveDisable(ReservationExclusive):
exclusive = forms.BooleanField(
label=_(u"No overlap"),
required=False,
|
initial=True)
class ConfirmForm(forms.Form):
confirm = forms.BooleanField()
|
xuweiliang/Codelibrary
|
openstack_dashboard/dashboards/project/routers/ports/views.py
|
Python
|
apache-2.0
| 3,508 | 0 |
# Copyright 2012, Nachi Ueno,
|
NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License a
|
t
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.admin.routers.ports \
import forms as project_forms
from openstack_dashboard.dashboards.admin.routers.ports \
import tabs as project_tabs
class AddInterfaceView(forms.ModalFormView):
form_class = project_forms.AddInterface
template_name = 'admin/routers/ports/create.html'
success_url = 'horizon:admin:routers:detail'
failure_url = 'horizon:admin:routers:detail'
page_title = _("Add Interface")
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['router_id'],))
@memoized.memoized_method
def get_object(self):
try:
router_id = self.kwargs["router_id"]
return api.neutron.router_get(self.request, router_id)
except Exception:
redirect = reverse(self.failure_url, args=[router_id])
msg = _("Unable to retrieve router.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(AddInterfaceView, self).get_context_data(**kwargs)
context['router'] = self.get_object()
context['form_url'] = 'horizon:admin:routers:addinterface'
return context
def get_initial(self):
router = self.get_object()
return {"router_id": self.kwargs['router_id'],
"router_name": router.name_or_id}
class SetGatewayView(forms.ModalFormView):
form_class = project_forms.SetGatewayForm
template_name = 'admin/routers/ports/setgateway.html'
success_url = 'horizon:admin:routers:index'
failure_url = 'horizon:admin:routers:index'
page_title = _("Set Gateway")
def get_success_url(self):
return reverse(self.success_url)
@memoized.memoized_method
def get_object(self):
try:
router_id = self.kwargs["router_id"]
return api.neutron.router_get(self.request, router_id)
except Exception:
redirect = reverse(self.failure_url)
msg = _("Unable to set gateway.")
exceptions.handle(self.request, msg, redirect=redirect)
def get_context_data(self, **kwargs):
context = super(SetGatewayView, self).get_context_data(**kwargs)
context['router'] = self.get_object()
return context
def get_initial(self):
router = self.get_object()
return {"router_id": self.kwargs['router_id'],
"router_name": router.name_or_id}
class DetailView(tabs.TabView):
tab_group_class = project_tabs.PortDetailTabs
template_name = 'admin/networks/ports/detail.html'
|
asobolev/nix4nest
|
nix4nest/test/test_nest_api/test_multimeter.py
|
Python
|
lgpl-3.0
| 967 | 0.001034 |
import unittest
import nest
from nix4nest.nest_api.models.multimeter import NestMultimeter
class TestNode(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
self.neuron_id = nest.Create('iaf_neuron')[0]
rec_params = {'record_from': ['V_m'], 'withtime': True}
self.mm_id = nest.Create('multimeter', params=rec_params)[0
|
]
nest.Connect([self.mm_id], [self.neuron_id])
self.mm = NestMultimeter(self.mm_id, 'V_m')
def tearDown(self):
nest.ResetKernel()
def test_properties(self):
for k in nest.GetStatus([self.mm_id])[0].keys():
assert(k in self.mm
|
.properties)
def test_data(self):
assert(len(self.mm.data) == 0)
nest.Simulate(50)
assert(len(self.mm.data) == 0)
self.mm.refresh()
assert(len(self.mm.data) == 49)
assert(self.neuron_id in self.mm.senders)
assert((self.mm.senders == self.neuron_id).all())
|
trishika/home-monitor
|
monitor.py
|
Python
|
gpl-3.0
| 2,393 | 0.033445 |
#!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
"""
Home-monitor
~~~~~~~~~~~~
:copyright: (c) 2013 by Aurélien Chabot <aurelien@chabot.fr>
:license: GP
|
Lv3, see COPYING for more details.
"""
try:
import threadin
|
g
import sys, os, time, datetime
import json
import urllib2
from ConfigParser import SafeConfigParser
except ImportError as error:
print 'ImportError: ', str(error)
exit(1)
try:
sys.path.insert(0, '../rest/')
sys.path.insert(0, '/usr/local/bin/')
from restClientLib import get_nodes, set_switch, update_sensor, update_switch
except ImportError as error:
print 'Custom py ImportError: ', str(error)
exit(1)
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[default]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else: return self.fp.readline()
if len(sys.argv) > 1:
config = SafeConfigParser()
config.readfp(FakeSecHead(open(sys.argv[1])))
else:
print("You need to provide a configuration")
exit(1)
class Monitor(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def get_rules(self):
data = {}
try:
url = urllib2.urlopen("http://localhost:%(port)d/rules/" % { "port" : config.getint('default', 'PORT') })
data = json.loads(url.read())
except:
print("Failed to get rules")
print_exc()
finally:
return data
def run(self):
while True:
rules = self.get_rules()
for rule in rules:
now = datetime.datetime.now().timetuple()
test_start = int(now[3]) > int(rule["start_time"])
test_end = int(now[3]) < int(rule["end_time"])
if (test_start and test_end) or (int(rule["start_time"])>int(rule["end_time"]) and (test_start or test_end)):
switch = { 'host' : "http://" + rule['switch_host'], 'id' : rule['switch_id']}
sensor = { 'host' : "http://" + rule['sensor_host'], 'id' : rule['sensor_id']}
update_sensor(sensor)
if sensor['value'] < (rule['temp'] - 0.5):
print("Set heater on, current temp is %s, target is %s" % (str(sensor['value']), str(rule['temp'])))
set_switch(switch, 1)
if sensor['value'] > (rule['temp'] + 0.5):
print("Set heater off, current temp is %s, target is %s" % (str(sensor['value']), str(rule['temp'])))
set_switch(switch, 0)
time.sleep(60)
# Launch monitor
monitor = Monitor()
monitor.start()
|
datavisyn/tdp_core
|
tdp_core/config.py
|
Python
|
bsd-3-clause
| 688 | 0.011628 |
from phovea_server.ns import Namespace, abort
from phovea_server.util import jsonify
from phovea_server.config import get as
|
get_config
from phovea_server.plugin import list as list_plugins
import logging
app = Namespace(__name__)
_log = logging.getLogger(__name__)
@app.route('/<path:path>')
def _config(path):
path = path.split('/'
|
)
key = path[0]
plugin = next((p for p in list_plugins('tdp-config-safe-keys') if p.id == key), None)
if plugin is None:
_log.error('404: config key "{}" not found'.format(key))
abort(404, 'config key "{}" not found'.format(key))
path[0] = plugin.configKey
return jsonify(get_config('.'.join(path)))
def create():
return app
|
jim-easterbrook/pywws
|
src/pywws/service/pwsweather.py
|
Python
|
gpl-2.0
| 3,827 | 0.001045 |
# pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2018 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Upload weather data to PWS Weather.
`PWS Weather`_ is a site run by AerisWeather_ that "brings together
personal weather station data worldwide from locales not served by
primary weather services."
* Create account: http://www.pwsweather.com/register.php
* API based on WU protocol: `<http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol>`_
* Additional dependency: http://docs.python-requests.org/
* Example ``weather.ini`` configuration::
[pwsweather]
station = ABCDEFGH1
password = xxxxxxx
[logged]
services = ['pwsweather', 'underground']
.. _PWS Weather: http://www.pwsweather.com/
.. _AerisWeather: https://www.aerisweather.com/
"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from datetime
|
import timedelta
import logging
import os
import sys
import requests
import pywws.service
__docformat__ = "restructuredtext en"
service_name = os.path.splitext(os.path.basename(__file__))[0]
logger = logging.getLogger(__name
|
__)
class ToService(pywws.service.CatchupDataService):
config = {
'station' : ('', True, 'ID'),
'password': ('', True, 'PASSWORD'),
}
fixed_data = {'action': 'updateraw', 'softwaretype': 'pywws'}
logger = logger
service_name = service_name
template = """
#live#
#idx "'dateutc' : '%Y-%m-%d %H:%M:%S',"#
#wind_dir "'winddir' : '%.0f'," "" "winddir_degrees(x)"#
#wind_ave "'windspeedmph': '%.2f'," "" "wind_mph(x)"#
#wind_gust "'windgustmph' : '%.2f'," "" "wind_mph(x)"#
#hum_out "'humidity' : '%.d',"#
#temp_out "'tempf' : '%.1f'," "" "temp_f(x)"#
#rel_pressure "'baromin' : '%.4f'," "" "pressure_inhg(x)"#
#calc "temp_f(dew_point(data['temp_out'], data['hum_out']))" "'dewptf': '%.1f',"#
#calc "rain_inch(rain_hour(data))" "'rainin': '%g',"#
#calc "rain_inch(rain_day(data))" "'dailyrainin': '%g',"#
"""
def __init__(self, context, check_params=True):
super(ToService, self).__init__(context, check_params)
# extend template
if context.params.get('config', 'ws type') == '3080':
self.template += """
#illuminance "'solarradiation': '%.2f'," "" "illuminance_wm2(x)"#
#uv "'UV' : '%d',"#
"""
@contextmanager
def session(self):
with requests.Session() as session:
yield session
def upload_data(self, session, prepared_data={}):
try:
rsp = session.get(
'http://www.pwsweather.com/pwsupdate/pwsupdate.php',
params=prepared_data, timeout=60)
except Exception as ex:
return False, repr(ex)
if rsp.status_code != 200:
return False, 'http status: {:d}'.format(rsp.status_code)
text = rsp.text.strip()
if text:
return True, 'server response "{:s}"'.format(text)
return True, 'OK'
if __name__ == "__main__":
sys.exit(pywws.service.main(ToService))
|
hagifoo/gae-pomodoro
|
app/src/application/handler/__init__.py
|
Python
|
mit
| 2,303 | 0.000868 |
# coding: utf-8
import json
import logging
import webapp2
from webapp2_extras import sessions
from google.appengine.api.taskqueue import TombstonedTaskError, TaskAlreadyExistsError, DuplicateTaskNameError
from domain.entity import User
import error
class BaseHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
user = self.session.get('user')
if user:
self.user = User.from_json(user)
else:
self.user = None
try:
return webapp2.RequestHandler.dispatch(self)
except webapp2.HTTPException as e:
self.response.set_status(e.code)
if e.message:
self.response.write(e.message)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
return self.session_store.get_session()
@property
def session_id(self):
cookie_name = self.session_store.config['cookie_name']
return self.request.cookies[cookie_name]
class JsonHandler(BaseHandler):
def dispatch(self):
j = super(JsonHandler, self).dispatch()
self.response.headers['Content-Type'] = 'application/json; charset=utf-8'
if j is not None:
self.response.out.write(json.dumps(j))
class TaskHandler(BaseHandler):
"""Handle unrecoverable errors."""
def dispatch(self):
try:
super(TaskHandler, self).dispatch()
# Unrecoverable Exceptions such as Invalid Parameter
except error.TaskUnrecoverableException as e:
logging.error(e)
except (TombstonedTaskError,
TaskAlreadyExistsError,
|
DuplicateTaskNameError) as e:
logging.error(e)
def signin_user_only(f):
"""Raise UnauthorizedException if session user is None
Examples:
class MyHandler(BaseHandler):
@singin_user_only
def get(self):
# following code is executed only if user is signed in.
...
"""
def wrapper(s
|
elf, *args, **keywords):
if not self.user:
raise error.UnauthorizedException('Need sign in')
else:
return f(self, *args, **keywords)
return wrapper
|
legacysurvey/pipeline
|
py/legacyanalysis/fix-model-selection.py
|
Python
|
gpl-2.0
| 4,976 | 0.007838 |
from glob import glob
import fitsio
import sys
from astrometry.util.fits import *
from astrometry.util.file import *
from astrometry.util.starutil_numpy import *
from astrometry.libkd.spherematch import *
from collections import Counter
from legacypipe.oneblob import _select_model
from legacypipe.survey import wcs_for_brick
from astrometry.util.multiproc import multiproc
B = fits_table('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/survey-bricks.fits.gz')
def patch_one(X):
(ifn, Nfns, fn) = X
T8 = fits_table(fn)
phdr = fitsio.read_header(fn)
hdr = T8.get_header()
amfn = fn.replace('/tractor-', '/all-models-').replace('/tractor/', '/metrics/')
A = fits_table(amfn)
Ahdr = fitsio.read_header(amfn)
abands = Ahdr['BANDS'].strip()
nparams = dict(ptsrc=2, simple=2, rex=3, exp=5, dev=5, comp=9)
galaxy_margin = 3.**2 + (nparams['exp'] - nparams['ptsrc'])
rex = True
brick = B[B.brickname == T8.brickname[0]]
brick = brick[0]
brickwcs = wcs_for_brick(brick)
assert(len(A) == len(np.flatnonzero(T8.type != 'DUP ')))
typemap = dict(ptsrc='PSF', rex='REX', dev='DEV', exp='EXP', comp='COMP')
Tnew = T8.copy()
npatched = 0
for i,(d,ttype) in enumerate(zip(A.dchisq, T8.type)):
dchisqs = dict(zip(['ptsrc','rex','dev','exp','c
|
omp'], d))
mod = _select_model(dchisqs, nparams, galaxy_margin, rex)
ttype = ttype.strip()
# The DUP elements appear a
|
t the end, and we *zip* A and T8; A does not contain the DUPs
# so is shorter by the number of DUP elements.
assert(ttype != 'DUP')
newtype = typemap[mod]
# type unchanged
if ttype == newtype:
continue
# Copy fit values from the "newtype" entries in all-models
Tnew.type[i] = '%-4s' % newtype
cols = ['ra', 'dec', 'ra_ivar', 'dec_ivar']
nt = newtype.lower()
for c in cols:
Tnew.get(c)[i] = A.get('%s_%s' % (nt,c))[i]
# expand flux, flux_ivar
for c in ['flux', 'flux_ivar']:
flux = A.get('%s_%s' % (nt,c))[i]
if len(abands) == 1:
Tnew.get('%s_%s' % (c,abands[0]))[i] = flux
else:
for ib,band in enumerate(abands):
Tnew.get('%s_%s' % (c,band))[i] = flux[ib]
cc = []
if newtype in ['EXP', 'COMP']:
cc.append('exp')
if newtype in ['DEV', 'COMP']:
cc.append('dev')
for c1 in cc:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
ac = '%s_shape%s_%s%s' % (nt, c1, c2, c3)
Tnew.get(c)[i] = A.get(ac)[i]
if newtype == 'COMP':
Tnew.fracdev[i] = A.comp_fracdev[i]
Tnew.fracdev_ivar[i] = A.comp_fracdev_ivar[i]
if newtype == 'PSF':
# Zero out
for c1 in ['dev','exp']:
for c2 in ['e1','e2','r']:
for c3 in ['', '_ivar']:
c = 'shape%s_%s%s' % (c1, c2, c3)
Tnew.get(c)[i] = 0.
Tnew.fracdev[i] = 0.
Tnew.fracdev_ivar[i] = 0.
# recompute bx,by, brick_primary
ok,x,y = brickwcs.radec2pixelxy(Tnew.ra[i], Tnew.dec[i])
Tnew.bx[i] = x-1.
Tnew.by[i] = y-1.
Tnew.brick_primary[i] = ((Tnew.ra[i] >= brick.ra1 ) * (Tnew.ra[i] < brick.ra2) *
(Tnew.dec[i] >= brick.dec1) * (Tnew.dec[i] < brick.dec2))
npatched += 1
print('%i of %i: %s patching %i of %i sources' % (ifn+1, Nfns, os.path.basename(fn), npatched, len(Tnew)))
if npatched == 0:
return
phdr.add_record(dict(name='PATCHED', value=npatched,
comment='Patched DR8.2.1 model-sel bug'))
outfn = fn.replace('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/',
'patched/')
outdir = os.path.dirname(outfn)
try:
os.makedirs(outdir)
except:
pass
Tnew.writeto(outfn, header=hdr, primheader=phdr)
def main():
#fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/000/tractor-000??00?.fits')
fns = glob('/global/project/projectdirs/cosmo/work/legacysurvey/dr8/decam/tractor/*/tractor-*.fits')
fns.sort()
print(len(fns), 'Tractor catalogs')
vers = Counter()
keepfns = []
for fn in fns:
hdr = fitsio.read_header(fn)
ver = hdr['LEGPIPEV']
ver = ver.strip()
vers[ver] += 1
if ver == 'DR8.2.1':
keepfns.append(fn)
print('Header versions:', vers.most_common())
fns = keepfns
print('Keeping', len(fns), 'with bad version')
N = len(fns)
args = [(i,N,fn) for i,fn in enumerate(fns)]
mp = multiproc(8)
mp.map(patch_one, args)
if __name__ == '__main__':
main()
|
cloudControl/cctrl
|
cctrl/error.py
|
Python
|
apache-2.0
| 7,677 | 0.00482 |
# -*- coding: utf-8 -*-
"""
Copyright 2010 cloudControl UG (haftungsbeschraenkt)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
messages = {}
messages['WrongApplication'] = r'This application is unknown.'
messages['WrongDeployment'] = r'This deployment is unknown.'
messages['PasswordsDontMatch'] = r"The passwords don't match."
messages['InvalidApplicationName'] = r'Name may only contain a-z and 0-9 and must not start with a number.'
messages['WrongUsername'] = r'This username is unknown.'
messages['UserBelongsToApp'] = r'This user already belongs to this application.'
messages['RemoveUserGoneError'] = r'No such app or user. Please check app name and user name or email address.'
messages['UserCreatedNowCheckEmail'] = r'User has been created. Please check you e-mail for your confirmation code.'
messages['DeleteOnlyApplication'] = r'You can only delete applications not deployments. Try the undeploy command.'
messages['NoAliasGiven'] = r'You have to specify an alias.'
messages['WrongAlias'] = r'This alias is unknown.'
messages['NotAllowed'] = r'Sorry. You are not allowed to perform this action.'
messages['CannotDeleteDeploymentExist'] = r'You have to undeploy all related deployments, before you can delete the application.'
messages['NotAuthorized'] = r'The authorization failed, check your e-mail address and password.'
messages['PermissionDenied'] = r'You are not allowed to push to this repository. Maybe check your keys using "cctrluser key".'
messages['SecurityQuestionDenied'] = r'Action canceled on user request.'
messages['WrongAddon'] = r'This addon is unknown for this app_name/deployment_name.'
messages['DuplicateAddon'] = r'You can not add the same addon option twice.'
messages['InvalidAddon'] = r'This is not a valid addon name. Check the list of available addons with {0} app_name/deployment_name addon.list .'.format(os.path.basename(sys.argv[0]))
messages['ForbiddenAddon'] = 'You are not allowed to perform this action.\nIf you are trying to use a Beta addon, you can request access from the addon page.'
messages['WrongPubKeyName'] = r'The public key file must be named "id_rsa.pub".'
messages['NoSuchKeyFile'] = r'No such key file. Please check your input.'
messages['WrongKeyFormat'] = r'Your id_rsa.pub public key file seems to be in the wrong format.'
messages['InvalidAppOrDeploymentName'] = r'The application or deployment name is invalid.'
messages['KeyDuplicate'] = r'This key was added previously.'
messages['NoWorkerCommandGiven'] = r'The worker command is missing. Try the path to your PHP file relative from your repository root.'
messages['NoRunCommandGiven'] = r'Run command is missing.'
messages['WrongWorker'] = r'There is no such worker for this app_name/deployment_name.'
messages['NeitherBazaarNorGitFound'] = r'Cannot find "git" nor "bzr"! Please make sure either Bazaar or Git executables are in your path.'
messages['BazaarRequiredToPush'] = r'Please make sure the Bazaar executable is in your path.'
messages['GitRequiredToPush'] = r'Please make sure the Git executable is in your path.'
messages['NoCronURLGiven'] = r'You must provide a URL for cron to call.'
messages['NoSuchCronJob'] = r'Sorry, we can not find cronjob with this ID.'
messages['FileReadOrWriteFailed'] = r'Sorry, could not read or write to file.'
messages['FileNotFound'] = r'Sorry, file not found!'
messages['UserShouldCreateKey'] = r'Sorry, something went wrong when creating a key. Please create a key on your system, then run the command again.'
messages['BazaarConfigFound'] = r'Bazaar configuration found! Using "Bazaar" as repository type.'
messages['GitConfigFound'] = r'Git configuration found! Using "Git" as repository type.'
messages['BazaarExecutableFound'] = r'Bazaar seems to be installed! Using "Bazaar" as repository type.'
messages['GitExecutableFound'] = r'Git seems to be installed! Using "Git" as repository type.'
messages['CreatingAppAsDefaultRepoType'] = r'Using default "Git" as repository type.'
messages['DeleteAppsBeforeUser'] = r'There are still applications associated with this user account. Undeploy and/or delete applications before deleting user.'
messages['NoSuchFile'] = r'File not found.'
messages['APIUnreachable'] = r'Could not connect to API...'
messages['NoBuildpackURL'] = r'You need to provide a buildpack URL for "custom" application type.'
messages['NoCustomApp'] = r'You can only provide a buildpack URL if the app type is "custom".'
messages['NoValidBuildpackURL'] = r'The buildpack URL provided is not valid. Please try again.'
messages['AmbiguousSize'] = r'You can only specify one of --size or --memory.'
messages['InvalidMemory'] = r'Memory size should be an integer between 128 and 1024 MB.'
messages['InvalidSize'] = r'Size should be an integer between 1 and 8.'
messages['NoPreviousVersionFound'] = r'Previous version not found.'
messages['ClearCacheFailed'] = r'Clear buildpack cache failed.'
messages['DeploymentFailed'] = r'Deployment failed.'
messages['CommandNotImplemented'] = r'Sorry, this command is not available.'
messages['ShipAndDeploy'] = r'--ship and --push options cannot be used simultaneously.'
messages['RegisterDisabled'] = r'You can register on {0}.'
messages['NoVariablesGiven'] = r'You must provide some variables.'
messages['DuplicatedFlag'] = r'Please, specify a flag only once.'
messages['NotAuthorizedPublicKey'] = r'Public Key authentication failed. Trying with password.'
messages['WrongPublicKey'] = r'Public Key not found or invalid.'
messages['WrongKeyPath'] = r'Wrong Private Key path.'
messages['EncryptedKey'] = r'Private Key file is encrypted, please check if the ssh-agent is running.'
messages['KeyNotFound'] = r'No Private Key found.'
messages['SignatureCreateFailure'] = r'Signature could not be created.'
messages['RSAKeyRequired'] = r'Currently we support RSA keys only.'
if sys.platform == 'win32':
messages['UpdateAvailable'] = r'A newer version is available. Please update.'
messages['UpdateRequired'] = r'A newer version is required. You need to upgrade before using this program.'
else:
messages['UpdateAvailable'] = r'A newer version is available. To upgrade run: (sudo) pip install {0} --upgrade'
messages['UpdateRequired'] = r'A newer vers
|
ion is required. You need to upgrade before using this program. To upgrade run: (sudo) pip install {0} --upgrade'
class CctrlException(Exception):
def __init__(self, error_key):
self.error_message = messages[error_key]
def __str__(self):
return '[
|
ERROR]' + ' ' + self.error_message
class InputErrorException(CctrlException):
"""
This exception is raised if for some reason someone put something in we
could not understand at all.
"""
pass
class PasswordsDontMatchException(Exception):
"""
This exception is raised if the password and the password check weren't
equal for three times.
"""
pass
class PublicKeyException(CctrlException):
"""
This exception is raised if the Public Key is not found
"""
pass
class SignatureException(CctrlException):
"""
This exception is raised if the signature cannot be created
"""
pass
|
ibus/ibus-qt
|
src/interfaces/introspect_panel.py
|
Python
|
gpl-2.0
| 130 | 0.007692 |
#!/usr/bin/env python
import ibus
import dbus
bus = dbus.SessionBus()
e = i
|
bus.interface.IPanel()
print e.Introspect("/", bus
|
)
|
joshmoore/zeroc-ice
|
py/demo/Ice/latency/Client.py
|
Python
|
gpl-2.0
| 1,349 | 0.005189 |
#!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# ******************************************
|
****************************
import sys,
|
time, traceback, Ice
Ice.loadSlice('Latency.ice')
import Demo
class Client(Ice.Application):
def run(self, args):
if len(args) > 1:
print self.appName() + ": too many arguments"
return 1
ping = Demo.PingPrx.checkedCast(self.communicator().propertyToProxy('Ping.Proxy'))
if not ping:
print "invalid proxy"
return 1
# Initial ping to setup the connection.
ping.ice_ping();
repetitions = 100000
print "pinging server " + str(repetitions) + " times (this may take a while)"
tsec = time.time()
i = repetitions
while(i >= 0):
ping.ice_ping()
i = i - 1
tsec = time.time() - tsec
tmsec = tsec * 1000.0
print "time for %d pings: %.3fms" % (repetitions, tmsec)
print "time per ping: %.3fms" % (tmsec / repetitions)
return 0
app = Client()
sys.exit(app.main(sys.argv, "config.client"))
|
Hemisphere-Project/HPlayer2
|
core/interfaces/regie.py
|
Python
|
gpl-3.0
| 10,996 | 0.012095 |
from .base import BaseInterface
import eventlet
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from flask import Flask, render_template, session, request, send_from_directory
from flask_socketio import SocketIO, emit, join_room, leave_room, close_room, rooms, disconnect
from werkzeug.utils import secure_filename
import threading, os, time, queue
import logging, sys, json
from ..engine.network import get_allip, get_hostname
import socket
from zeroconf import ServiceInfo, Zeroconf
thread = None
thread_lock = threading.Lock()
REGIE_PATH1 = '/opt/RPi-Regie'
REGIE_PATH2 = '/data/RPi-Regie'
class RegieInterface (BaseInterface):
def __init__(self, hplayer, port, datapath):
super(RegieInterface, self).__init__(hplayer, "Regie")
self._port = port
self._datapath = datapath
self._server = None
# HTTP receiver THREAD
def listen(self):
# Advertize on ZeroConf
zeroconf = Zeroconf()
info = ServiceInfo(
"_http._tcp.local.",
"Regie._"+get_hostname()+"._http._tcp.local.",
addresses=[socket.inet_aton(ip) for ip in get_allip()],
port=self._port,
properties={},
server=get_hostname()+".local.",
)
zeroconf.register_service(info)
# Start server
self.log( "regie interface on port", self._port)
with ThreadedHTTPServer(self, self._port) as server:
self._server = server
self.stopped.wait()
self._server = None
# Unregister ZeroConf
zeroconf.unregister_service(info)
zeroconf.close()
def projectPath(self):
return os.path.join(self._datapath, 'project.json')
def projectRaw(self):
project = '{"pool":[], "project":[[]]}'
if os.path.isfile(self.projectPath()):
with open( self.projectPath(), 'r') as file:
|
project = file.read()
return project
# parse locally for programatic execution
def reload(self):
try:
self._project = json.loads(self.projectRaw())
except:
self._project = None
self.log("Error while parsing project..")
|
# print(self._project)
return self._project
# play sequence
def playseq(self, sceneIndex, seqIndex):
self.log("PLAYSEQ")
try:
# self.log('PLAYSEQ', seqIndex, sceneIndex, boxes)
orderz = []
boxes = [b for b in self._project["project"][0][sceneIndex]["allMedias"] if b["y"] == seqIndex]
for b in boxes:
peerName = self._project["pool"][ b["x"] ]["name"]
# MEDIA
order = { 'peer': peerName, 'synchro': True}
if b["media"] in ['stop', 'pause', 'unfade'] :
order["event"] = b["media"]
elif b["media"] == '...':
order["event"] = 'continue'
elif b["media"].startswith('fade'):
order["event"] = 'fade'
order["data"] = b["media"].split('fade ')[1]
else:
order["event"] = 'playthen'
order["data"] = [ self._project["project"][0][sceneIndex]["name"] + '/' + b["media"] ]
# ON MEDIA END
if 'onend' in b:
if b['onend'] == 'next':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex+1] } )
elif b['onend'] == 'prev':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex-1] } )
elif b['onend'] == 'replay':
order["data"].append( {'event': 'do-playseq', 'data': [sceneIndex, seqIndex] } )
orderz.append(order)
# LOOP
if b["loop"] == 'loop':
orderz.append( { 'peer': peerName, 'event': 'loop', 'data': 1} )
elif b["loop"] == 'unloop':
orderz.append( { 'peer': peerName, 'event': 'unloop'} )
# LIGHT
if b["light"] and b["light"] != '...':
order = { 'peer': peerName, 'synchro': True, 'event': 'esp'}
if b["light"].startswith('light'):
order["data"] = {
'topic': 'leds/all',
'data': b["light"].split('light ')[1]
}
elif b["light"].startswith('preset'):
order["data"] = {
'topic': 'leds/mem',
'data': b["light"].split('preset ')[1]
}
elif b["light"].startswith('off'):
order["data"] = {
'topic': 'leds/stop',
'data': ''
}
orderz.append(order)
self.emit('playingseq', sceneIndex, seqIndex)
self.emit('peers.triggers', orderz, 437)
except:
self.log('Error playing Scene', sceneIndex, 'Seq', seqIndex)
#
# Threaded HTTP Server
#
class ThreadedHTTPServer(object):
def __init__(self, regieinterface, port):
self.regieinterface = regieinterface
interface_path = os.path.dirname(os.path.realpath(__file__))
if os.path.isdir(REGIE_PATH1):
www_path = os.path.join(REGIE_PATH1, 'web')
elif os.path.isdir(REGIE_PATH2):
www_path = os.path.join(REGIE_PATH2, 'web')
else:
www_path = os.path.join(interface_path, 'regie')
app = Flask(__name__, template_folder=www_path)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app, cors_allowed_origins="*")
#
# FLASK Routing Static
#
@app.route('/')
def index():
# self.regieinterface.log('requesting index')
return send_from_directory(www_path, 'index.html')
@app.route('/<path:path>')
def send_static(path):
# self.regieinterface.log('requesting '+path)
return send_from_directory(www_path, path)
#
# FLASK Routing API
#
# @app.route('/<path:path>')
# def send_static(path):
# # self.regieinterface.log('requesting '+path)
# return send_from_directory(www_path, path)
#
# SOCKETIO Routing
#
self.sendBuffer = queue.Queue()
def background_thread():
while True:
try:
task = self.sendBuffer.get_nowait()
if len(task) > 1: socketio.emit(task[0], task[1])
else: socketio.emit(task[0], None)
self.sendBuffer.task_done()
except queue.Empty:
socketio.sleep(0.1)
@self.regieinterface.hplayer.on('files.dirlist-updated')
def filetree_send(ev, *args):
self.sendBuffer.put( ('data', {'fileTree': self.regieinterface.hplayer.files()}) )
@self.regieinterface.hplayer.on('files.activedir-updated')
def activedir_send(ev, *args):
self.sendBuffer.put( ('data', {'scene': args[1]}) )
@self.regieinterface.hplayer.on('*.peer.*')
def peer_send(ev, *args):
event = ev.split('.')[-1]
if event == 'playingseq':
print(ev, args[0]['data'][1])
self.sendBuffer.put( ('data', {'sequence': args[0]['data'][1]}) )
else:
args[
|
dnjohnstone/hyperspy
|
hyperspy/misc/export_dictionary.py
|
Python
|
gpl-3.0
| 7,725 | 0.000647 |
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from operator import attrgetter
from hyperspy.misc.utils import attrsetter
from copy import deepcopy
import dill
from dask.array import Array
def check_that_flags_make_sense(flags):
# one of: fn, id, sig
def do_error(f1, f2):
raise ValueError(
'The flags "%s" and "%s" are not compatible' %
(f1, f2))
if 'fn' in flags:
if 'id' in flags:
do_error('fn', 'id')
if 'sig' in flags:
do_error('fn', 'sig')
if 'id' in flags:
# fn done previously
if 'sig' in flags:
do_error('id', 'sig')
if 'init' in flags:
do_error('id', 'init')
# all sig cases already covered
def parse_flag_string(flags):
return flags.replace(' ', '').split(',')
def export_to_dictionary(target, whitelist, dic, fullcopy=True):
""" Exports attributes of target from whitelist.keys() to dictionary dic
All values are references only by default.
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
whitelist : dictionary
A dictionary, keys of which are used as attributes for exporting.
Key 'self' is only available with tag 'id', when the id of the
target is saved. The values are either None, or a tuple, where:
* the first item a string, which containts flags, separated by
commas.
* the second item is None if no 'init' flag is given, otherwise
the object required for the initialization.
The flag conventions are as follows:
* 'init': object used for initialization of the target. The object is
saved in the tuple in whitelist
* 'fn': the targeted attribute is a function, and may be pickled. A
tuple of (thing, value) will be exported to the dictionary,
where thing is None if function is passed as-is, and True if
dill package is used to pickle the function, with the value as
the result of the pickle.
* 'id': the id of the targeted attribute is exported (e.g. id(target.name))
* 'sig': The targeted attribute is a signal, and will be converted to a
dictionary if fullcopy=True
dic : dictionary
A dictionary where the object will be exported
fullcopy : bool
Copies of objects are stored, not references. If any found,
functions will be pickled and signals converted to dictionaries
"""
whitelist_flags = {}
for key, value in whitelist.items():
if value is None:
# No flags and/or values are given, just save the target
thing = attrgetter(key)(target)
if fullcopy:
thing = deepcopy(thing)
dic[key] = thing
whitelist_flags[key] = ''
continue
flags_str, value = value
flags = parse_flag_string(flags_str)
check_that_flags_make_sense(flags)
if key == 'self':
if 'id' not in flags:
raise ValueError(
'Key "self" is only available with flag "id" given')
value = id(target)
else:
if 'id' in flags:
value = id(attrgetter(key)(target))
# here value is either id(thing), or None (all others except 'init'),
# or something for init
if 'init' not in flags and value is None:
value = attrgetter(key)(target)
# here value either id(thing), or an actual target to export
if 'sig' in flags:
if fullcopy:
from hyperspy.signal import BaseSignal
if isinstance(value, BaseSignal):
value = value._to_dictionary()
value['data'] = deepcopy(value['data'])
elif 'fn' in flags:
if fullcopy:
value = (True, dill.dumps(value))
else:
value = (None, value)
elif fullcopy:
value = deepcopy(value)
dic[key] = value
whitelist_flags[key] = flags_str
if '_whitelist' not in dic:
dic['_whitelist'] = {}
# the saved whitelist does not have any values, as they are saved in the
# original dictionary. Have to restore then when loading from dictionary,
# most notably all with 'init' flags!!
dic['_whitelist'].update(whitelist_flags)
def load_from_dictionary(target, dic):
""" Loads attributes of target to dictionary dic
The attribute list is read from dic['_whitelist'].keys()
Parameters
----------
target : object
must contain the (nested) attributes of the whitelist.keys()
dic : dictionary
A dictionary, containing field '_whitelist', which is a dictionary
with all keys that were exported, with values being flag strings.
The convention of the flags is as follows:
* 'init': object used for initialization of the target. Will be
copied to the _whitelist after loading
* 'fn': the targeted attribute is a function, and may have been
pickled (preferably with dill package).
* 'id': the id of the original object was exported and the
attribute will not be set. The key has to be '_id_'
* 'sig': The targeted attribute was a signal, and may have been
converted to a dictionary if fullcopy=True
"""
new_whitelist = {}
for key, flags_str in dic['_whitelist'].items():
value = dic[key]
flags = parse_flag_string(flags_str)
if 'id' not in flags:
value = reconstruct_object(flags, value)
if 'init' in flags:
new_whitelist[key] = (flags_str, value)
else:
attrsetter(target, key, value)
if len(flags_str):
new_whitelist[key] = (flags_str, None)
else:
new_whitelist[key] = None
if hasattr(target, '_whitelist'):
if isinstance(target._whitelist, dict):
target._whitelist.update(new_whitelist)
else:
attrsetter(target, '_whitelist', new_whitelist)
def reconstruct_object(flags, value):
""" Reconstructs the value (if necessary) after having saved it in a
dictionary
"""
if not isinstance(f
|
lags, list):
flags = parse_flag_string(flags)
if 'sig' in flags:
if isinstance(value,
|
dict):
from hyperspy.signal import BaseSignal
value = BaseSignal(**value)
value._assign_subclass()
return value
if 'fn' in flags:
ifdill, thing = value
if ifdill is None:
return thing
if ifdill in [True, 'True', b'True']:
return dill.loads(thing)
# should not be reached
raise ValueError("The object format is not recognized")
if isinstance(value, Array):
value = value.compute()
return value
|
discreteoptimization/setcover
|
cp_homebrew_003/validator.py
|
Python
|
mit
| 2,676 | 0.002242 |
#!/usr/bin/env python
# encoding: utf-8
"""
For local testing purposes
"""
from itertools import compress, chain, product, ifilter
from functools import partial
from reader import read_input, list_files
def is_valid(task, solution):
"""
:param reader.Task task:
:param list[1|0] solution:
:return bool: whether constraints in task are met
"""
sets = compress(task.sets, so
|
lution)
items_covered = set(chain.from_iterable(s.items for s in sets))
return len(items_covered) == task.item_count
def calc_cost(task, solution):
"""
:param reader.Task task:
:param list[1|0] solution:
:return int:
"""
sets = compress(task.sets, solution)
|
return sum(s.cost for s in sets)
def bruteforce_solver(task):
"""
As simple solution as we can make.
It finds the optimal solution, but it can't work on big inputs
(say, 20 sets take a few seconds, 25 sets - take a few minutes)
:param reader.Task task:
:return list[1|0]:
"""
all_configurations = product([0, 1], repeat=task.set_count)
valid_configurations = ifilter(partial(is_valid, task), all_configurations)
return min(valid_configurations, key=partial(calc_cost, task))
def check_solver(solver, inputs=list_files(max_size=20)):
"""
Prove optimality with comparing solution with control version.
Only for small examples, sorry. For big ones you can call is_valid()
:param task:
:param function(task) solver:
:return:
"""
for fn in inputs:
task = read_input(fn)
solution = solver(task)
if not is_valid(task, solution):
print 'ERROR: solution for {fn} is invalid: {solution}'.format(fn=fn, solution=solution)
continue
control_solution = bruteforce_solver(task)
control_cost = calc_cost(task, control_solution)
cost = calc_cost(task, solution)
if cost != control_cost:
msg = ('ERROR: solution for {fn} has cost={cost}, but optimal is {control_cost}:\n' +
' control:{control_solution}\n' +
' tested: {solution}')
print msg.format(fn=fn, cost=cost, control_cost=control_cost,
control_solution=control_solution, solution=solution)
continue
print 'OK: solution for {fn} is optimal, cost={cost}'.format(fn=fn, cost=cost)
if __name__ == '__main__':
from cp_solver import deep_search
check_solver(lambda (task): deep_search(task).best_solution) # put your solver here
# check_solver(lambda (task): deep_search(task).best_solution, list_files(min_size=30, max_size=50)) # put your solver here
|
frappe/erpnext
|
erpnext/stock/report/cogs_by_item_group/cogs_by_item_group.py
|
Python
|
gpl-3.0
| 5,496 | 0.025291 |
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
import datetime
from collections import OrderedDict
from typing import Dict, List, Tuple, Union
import frappe
from frappe import _
from frappe.utils import date_diff
from erpnext.accounts.report.general_ledger.general_ledger import get_gl_entries
Filters = frappe._dict
Row = frappe._dict
Data = List[Row]
Columns = List[Dict[str, str]]
DateTime = Union[datetime.date, datetime.datetime]
FilteredEntries = List[Dict[str, Union[str, float, DateTime, None]]]
ItemGroupsDict = Dict[Tuple[int, int], Dict[str, Union[str, int]]]
SVDList = List[frappe._dict]
def execute(filters: Filters) -> Tuple[Columns, Data]:
update_filters_with_account(filters)
validate_filters(filters)
columns = get_columns()
data = get_data(filters)
return columns, data
def update_filters_with_account(filters: Filters) -> None:
account = frappe.get_value("Company", filters.get("company"), "default_expense_account")
filters.update(dict(account=account))
def validate_filters(filters: Filters) -> None:
if filters.from_date > filters.to_date:
frappe.throw(_("From Date must be before To Date"))
def get_columns() -> Columns:
return [
{
'label': _('Item Group'),
'fieldname': 'item_group',
'fieldtype': 'Data',
'width': '200'
},
{
'label': _('COGS Debit'),
'fieldname': 'cogs_debit',
'fieldtype': 'Currency',
'width': '200'
}
]
def get_data(filters: Filters) -> Data:
filtered_entries = get_filtered_entries(filters)
svd_list = get_stock_value_difference_list(filtered_entries)
leveled_dict = get_leveled_dict()
assign_self_values(leveled_dict, svd_list)
assign_agg_values(leveled_dict)
data = []
for item in leveled_dict.items():
i = item[1]
if i['agg_value'] == 0:
continue
data.append(get_row(i['name'], i['agg_value'], i['is_group'], i['level']))
if i['self_value'] < i['agg_value'] and i['self_value'] > 0:
data.append(get_row(i['name'], i['self_value'], 0, i['level'] + 1))
return data
def get_filtered_entries(filters: Filters) -> FilteredEntries:
gl_entries = get_gl_entries(filters, [])
filtered_entries = []
for entry in gl_entries:
posting_date = entry.get('posting_date')
from_date = filters.get('from_date')
if date_diff(from_date, posting_date) > 0:
continue
filtered_entries.append(entry)
return filtered_entries
def get_stock_value_difference_list(filtered_entries: FilteredEntries) -> SVDList:
voucher_nos = [fe.get('voucher_no') for fe in filtered_entries]
svd_list = frappe.get_list(
'Stock Ledger Entry', fields=['item_code','stock_value_difference'],
filters=[('voucher_no', 'in', voucher_nos), ("is_cancelled", "=", 0)]
)
assign_item_groups_to_svd_list(svd_list)
return svd_list
def get_leveled_dict() -> OrderedDict:
item_groups_dict = get_item_groups_dict()
lr_list = sorted(item_groups_dict, key=lambda x : int(x[0]))
leveled_dict = OrderedDict()
current_level = 0
nesting_r = []
for l, r in lr_list:
while current_level > 0 and nesting_r[-1] < l:
nesting_r.pop()
current_level -= 1
leveled_dict[(l,r)] = {
'level' : current_level,
'name' : item_groups_dict[(l,r)]['name'],
'is_group' : item_groups_dict[(l,r)]['is_group']
}
if int(r) - int(l) > 1:
current_level += 1
nesting_r.append(r)
update_leveled_dict(leveled_dict)
return leveled_dict
def assign_self_values(leveled_dict: OrderedDict, svd_list: SVDList) -> None:
key_dict = {v['name']:k for k, v in leveled_dict.items()}
for item in svd_list:
key = key_dict[item.get("item_group")]
leveled_dict[key]['self_value'] += -item.get("stock_value_difference")
def assign_agg_values(leveled_dict: OrderedDict) -> None:
keys = list(leveled_dict.keys())[::-1]
prev_level = leveled_dict[keys[-1]]['level']
accu = [0]
for k in keys[:-1]:
curr_level = leveled_dict[k]['level']
if curr_level == prev_level:
accu[-1] += leveled_dict[k]['self_value']
leveled_dict[k]['agg_value'] = leveled_dict[k]['self_value']
elif curr_level > prev_level:
accu.append(leveled_dict[k]['self_value'])
leveled_dict[k]['agg_value'] = accu[-1]
elif curr_level < prev_level:
accu[-1] += leveled_dict[k]['self_value']
leveled_dict[k]['agg_value'] = accu[-1]
prev_level = curr_level
# root node
rk = keys[-1]
leveled_dict[rk]['agg_value'] = sum(accu) + leveled_dict[rk]['self_value']
def get_row(name:str, value:float, is_bold:int, indent:int) -> Row:
item_group = name
if is_bold:
item_group = frappe.bold(item_group)
return frappe._dict(item_group=item_group, cogs_debit=value, indent=indent)
def assign_item_groups_to_svd_list(svd_list: SVDList) -> None:
ig_map = get_item_groups_map(svd_list)
for item in svd_list:
item.item_group = ig_map[item.get("item_code")]
|
def get_item_groups_map(svd_list: SVDList) -> Dict[str, str]:
item_codes = set(i['item_code'] for i in svd_list)
ig_list = frappe.get_list(
'Item', fields=['item_code','item_group'],
filters=[('item_code', 'in', item_codes)]
)
return {i['item_code']:i['item_group'] for i in ig_list}
def get_item_groups_di
|
ct() -> ItemGroupsDict:
item_groups_list = frappe.get_all("Item Group", fields=("name", "is_group", "lft", "rgt"))
return {(i['lft'],i['rgt']):{'name':i['name'], 'is_group':i['is_group']}
for i in item_groups_list}
def update_leveled_dict(leveled_dict: OrderedDict) -> None:
for k in leveled_dict:
leveled_dict[k].update({'self_value':0, 'agg_value':0})
|
ggaughan/dee
|
DeeCluster.py
|
Python
|
mit
| 1,475 | 0.008814 |
"""DeeCluster: provides a namespace for a set of DeeDatabases"""
__version__ = "0.1"
__author__ = "Greg Gaughan"
__copyright__ = "Copyright (C) 2007 Greg Gaughan"
__license__ = "MIT" #see Licence.txt for licence information
from Dee import Relation, Tuple
from DeeDatabase import Database
class Cluster(dict):
"""A namespace for databases"""
def __init__(self, name="nemo"):
"""Create a Cluster
Define initial databases
|
here
(Called once on cluster creation)
"""
dict.__init__(self)
self.name=name
self.databases = Relation(['database_name'], self.vdatabases)
#todo should really have relations, attributes etc. to define this...
def __getattr__(self, key):
if self.has_key(key):
return self[key]
raise AttributeError, repr(key)
def __setattr__(self, key, value):
#todo reject non-Databa
|
se?
self[key] = value
#todo delattr
def __contains__(self, item):
if item in self.__dict__:
if isinstance(self.__dict__[item], Database):
return True
return False
def __iter__(self):
for (k, v) in self.items():
#for (k, v) in self.__dict__.items():
if isinstance(v, Database):
yield (k, v)
def vdatabases(self):
return [Tuple(database_name=k)
for (k, v) in self]
|
freezmeinster/teh-manis
|
django/conf/global_settings.py
|
Python
|
bsd-3-clause
| 21,140 | 0.00175 |
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing siutations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities).
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'id'
# Languages we provide transl
|
ations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('B
|
osnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nl', gettext_noop('Dutch')),
('no', gettext_noop('Norwegian')),
('nb', gettext_noop('Norwegian Bokmal')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale
USE_L10N = True
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various e-mails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link e-mails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
# Legacy format
DATABASE_ENGINE = '' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = '' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
DATABASE_OPTIONS = {} # Set to empty dictionary for default.
# New format
DATABASES = {
}
# Classes used to implement db routing behaviour
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending e-mail.
EMAIL_HOST = 'localhost'
# Port for sending e-mail.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.auth',
)
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default e-mail address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression o
|
wojtask/CormenPy
|
test/test_chapter16/test_problem16_1.py
|
Python
|
gpl-3.0
| 1,843 | 0.00217 |
import io
import math
import random
from contextlib import redirect_stdout
from unittest import TestCase
from hamcrest import *
from array_util import get_random_unique_array
from chapter16.problem16_1 import greedy_make_change, make_change, print_change
from datastructures.array import Array
from util import between
def get_min_change_size_bruteforce(n, d):
if n == 0:
return 0
min_change = math.inf
for denom in d:
if denom <= n:
min_change = min(min_change, 1 + get_min_change_size_bru
|
teforce(n - denom, d))
return min_ch
|
ange
class TestProblem16_1(TestCase):
def test_greedy_make_change(self):
n = random.randint(1, 20)
d = Array([1, 2, 5, 10, 20, 50])
actual_change = greedy_make_change(n)
expected_change_size = get_min_change_size_bruteforce(n, d)
actual_change_sum = sum(actual_change[i] * d[i] for i in between(1, d.length))
assert_that(sum(actual_change), is_(equal_to(expected_change_size)))
assert_that(actual_change_sum, is_(equal_to(n)))
def test_make_change(self):
n = random.randint(1, 20)
k = random.randint(1, 5)
d, _ = get_random_unique_array(max_size=k, min_value=2, max_value=20)
d[1] = 1
captured_output = io.StringIO()
actual_change, actual_denominators = make_change(n, d)
with redirect_stdout(captured_output):
print_change(n, actual_denominators)
expected_change_size = get_min_change_size_bruteforce(n, d)
assert_that(actual_change[n], is_(equal_to(expected_change_size)))
actual_change_denoms = [int(d) for d in captured_output.getvalue().splitlines()]
assert_that(sum(actual_change_denoms), is_(equal_to(n)))
assert_that(len(actual_change_denoms), is_(equal_to(expected_change_size)))
|
Tigerwhit4/taiga-back
|
taiga/projects/tasks/api.py
|
Python
|
agpl-3.0
| 7,479 | 0.002541 |
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.utils.translation import ugettext as _
from taiga.base.api.utils import get_object_or_404
from taiga.base import filters, response
from taiga.base import exceptions as exc
fr
|
om taiga.base.decorators import list_route
from taiga.base.api import ModelCrudViewSet
from taiga.projects.models import Project, TaskStatus
from django.http import HttpResponse
from taiga.projects.notifications.mixins import WatchedResourceMixin
from taiga.projects.history.mixins import HistoryResourceMixin
from taiga.projects.occ import OCCResourceMixin
from
|
. import models
from . import permissions
from . import serializers
from . import services
class TaskViewSet(OCCResourceMixin, HistoryResourceMixin, WatchedResourceMixin, ModelCrudViewSet):
model = models.Task
permission_classes = (permissions.TaskPermission,)
filter_backends = (filters.CanViewTasksFilterBackend,)
filter_fields = ["user_story", "milestone", "project", "assigned_to",
"status__is_closed", "watchers"]
def get_serializer_class(self, *args, **kwargs):
if self.action in ["retrieve", "by_ref"]:
return serializers.TaskNeighborsSerializer
if self.action == "list":
return serializers.TaskListSerializer
return serializers.TaskSerializer
def update(self, request, *args, **kwargs):
self.object = self.get_object_or_none()
project_id = request.DATA.get('project', None)
if project_id and self.object and self.object.project.id != project_id:
try:
new_project = Project.objects.get(pk=project_id)
self.check_permissions(request, "destroy", self.object)
self.check_permissions(request, "create", new_project)
sprint_id = request.DATA.get('milestone', None)
if sprint_id is not None and new_project.milestones.filter(pk=sprint_id).count() == 0:
request.DATA['milestone'] = None
us_id = request.DATA.get('user_story', None)
if us_id is not None and new_project.user_stories.filter(pk=us_id).count() == 0:
request.DATA['user_story'] = None
status_id = request.DATA.get('status', None)
if status_id is not None:
try:
old_status = self.object.project.task_statuses.get(pk=status_id)
new_status = new_project.task_statuses.get(slug=old_status.slug)
request.DATA['status'] = new_status.id
except TaskStatus.DoesNotExist:
request.DATA['status'] = new_project.default_task_status.id
except Project.DoesNotExist:
return response.BadRequest(_("The project doesn't exist"))
return super().update(request, *args, **kwargs)
def pre_save(self, obj):
if obj.user_story:
obj.milestone = obj.user_story.milestone
if not obj.id:
obj.owner = self.request.user
super().pre_save(obj)
def pre_conditions_on_save(self, obj):
super().pre_conditions_on_save(obj)
if obj.milestone and obj.milestone.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this sprint to this task."))
if obj.user_story and obj.user_story.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this user story to this task."))
if obj.status and obj.status.project != obj.project:
raise exc.WrongArguments(_("You don't have permissions to set this status to this task."))
if obj.milestone and obj.user_story and obj.milestone != obj.user_story.milestone:
raise exc.WrongArguments(_("You don't have permissions to set this sprint to this task."))
@list_route(methods=["GET"])
def by_ref(self, request):
ref = request.QUERY_PARAMS.get("ref", None)
project_id = request.QUERY_PARAMS.get("project", None)
task = get_object_or_404(models.Task, ref=ref, project_id=project_id)
return self.retrieve(request, pk=task.pk)
@list_route(methods=["GET"])
def csv(self, request):
uuid = request.QUERY_PARAMS.get("uuid", None)
if uuid is None:
return response.NotFound()
project = get_object_or_404(Project, tasks_csv_uuid=uuid)
queryset = project.tasks.all().order_by('ref')
data = services.tasks_to_csv(project, queryset)
csv_response = HttpResponse(data.getvalue(), content_type='application/csv; charset=utf-8')
csv_response['Content-Disposition'] = 'attachment; filename="tasks.csv"'
return csv_response
@list_route(methods=["POST"])
def bulk_create(self, request, **kwargs):
serializer = serializers.TasksBulkSerializer(data=request.DATA)
if serializer.is_valid():
data = serializer.data
project = Project.objects.get(id=data["project_id"])
self.check_permissions(request, 'bulk_create', project)
tasks = services.create_tasks_in_bulk(
data["bulk_tasks"], milestone_id=data["sprint_id"], user_story_id=data["us_id"],
status_id=data.get("status_id") or project.default_task_status_id,
project=project, owner=request.user, callback=self.post_save, precall=self.pre_save)
tasks_serialized = self.get_serializer_class()(tasks, many=True)
return response.Ok(tasks_serialized.data)
return response.BadRequest(serializer.errors)
def _bulk_update_order(self, order_field, request, **kwargs):
serializer = serializers.UpdateTasksOrderBulkSerializer(data=request.DATA)
if not serializer.is_valid():
return response.BadRequest(serializer.errors)
data = serializer.data
project = get_object_or_404(Project, pk=data["project_id"])
self.check_permissions(request, "bulk_update_order", project)
services.update_tasks_order_in_bulk(data["bulk_tasks"],
project=project,
field=order_field)
services.snapshot_tasks_in_bulk(data["bulk_tasks"], request.user)
return response.NoContent()
@list_route(methods=["POST"])
def bulk_update_taskboard_order(self, request, **kwargs):
return self._bulk_update_order("taskboard_order", request, **kwargs)
@list_route(methods=["POST"])
def bulk_update_us_order(self, request, **kwargs):
return self._bulk_update_order("us_order", request, **kwargs)
|
m4ll0k/Spaghetti
|
plugins/fingerprint/waf/jiasule.py
|
Python
|
gpl-3.0
| 587 | 0.040886 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name: Wascan - Web Application Scanner
# @repo
|
: https://github.com/m4ll0k/Wascan
# @author: Momo Outaadi (M4ll0k)
# @license: See the file 'LICENSE.txt
from re import search,I
def jiasule(headers,content):
_ = False
for header in headers.items():
_ |= search(r'__jsluid=|jsl_tracking',header[1],I) is not None
_ |= search(r'jiasule-waf',header[1],I) is not None
if _:break
_ |= search(r'static\.jiasule\.com/static/js/http_error\.js',content) i
|
s not None
if _ :
return "Jiasule Web Application Firewall (Jiasule)"
|
RedHatSatellite/satellite-sanity
|
satellite_sanity_lib/rules/sat5_cobbler_config.py
|
Python
|
gpl-3.0
| 3,726 | 0.021202 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
tags = ['Satellite_5', 'Spacewalk']
name = 'Basic Cobbler settings are correct'
def etc_cobbler_settings(data):
"""
Verify settings in /etc/cobbler/settings:
redhat_management_type: "site"
redhat_management_server: "satellite.example.com"
server: satellite.example.com
Teoretically we can have one option specified multiple times, so we want
to evaluate only last one.
"""
out = []
opts_found = 0
hostname = data['hostname'][0]
etc_cobbler_settings_redhat_management_type = ''
etc_cobbler_settings_redhat_management_server = ''
etc_cobbler_settings_server = ''
for line in data['etc_cobbler_settings']:
if re.match('^\s*redhat_management_type\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\bsite\b[\'"]?', val):
etc_cobbler_settings_redhat_management_type = ''
else:
etc_cobbler_settings_redhat_management_type = 'In /etc/cobbler/settings there should be \'redhat_management_type: "site"\''
if re.match('^\s*redhat_management_server\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\b%s\b[\'"]?' % hostname, val):
etc_cobbler_settings_redhat_management_server = ''
else:
etc_cobbler_settings_redhat_management_server = 'In /etc/cobbler/settings there should be \'redhat_management_server: %s\'' % hostname
if re.match('^\s*server\s*:', line):
opts_found += 1
val = line.split(':')[1].strip()
if re.search(r'[\'"]?\b%s\b[\'"]?' % hostname, val):
etc_cobbler_settings_server = ''
else:
etc_cobbler_settings_server = 'In /etc/cobbler/settings there should be \'server: %s\'' % hostname
if opts_found != 3:
out.append("Not all of redhat_management_type, redhat_management_server and server options found in /etc/cobbler/settings")
for o in (etc_cobbler_settings_redhat_management_type, etc_cobbler_settings_redhat_management_server, etc_cobbler_settings_server):
if o != '':
out.append(o)
return out
def etc_cobbler_modules_conf(data):
"""
Verify settings in /etc/cobbler/modules.conf:
[authentication]
module = authn_spacewalk
"""
out =
|
[]
opts_found = 0
etc_cobbler_modules_conf_authentication_module = ''
section_
|
auth = False
for line in data['etc_cobbler_modules_conf']:
if re.match('^\s*\[.*\]\s*$', line):
section_auth = False
if re.match('^\s*\[authentication\]\s*$', line):
section_auth = True
continue
if section_auth and re.match('^\s*module\s*=', line):
opts_found += 1
val = line.split('=')[1].strip()
if re.search(r'[\'"]?\bauthn_spacewalk\b[\'"]?', val):
etc_cobbler_modules_conf_authentication_module = ''
else:
etc_cobbler_modules_conf_authentication_module = 'In /etc/cobbler/modules.conf there should be \'module = authn_spacewalk\''
if opts_found != 1:
out.append("Option module in section authentication not found in /etc/cobbler/modules.conf")
for o in (etc_cobbler_modules_conf_authentication_module,):
if o != '':
out.append(o)
return out
def main(data):
"""
For hostname check noticed in the KB article we have different rule, we are
missing chack for hostname/ip in /etc/hosts though.
"""
out = []
out += etc_cobbler_settings(data)
out += etc_cobbler_modules_conf(data)
if out:
return {'errors': out}
def text(result):
out = ""
out += "Certain config options in Cobbler configuratin should be set as expected:\n"
for e in result['errors']:
out += " %s\n" % e
out += "See https://access.redhat.com/solutions/27936"
return out
|
tamland/trakt-sync
|
xbmc_library.py
|
Python
|
gpl-3.0
| 4,438 | 0.001127 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Thomas Amland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import logging
import json
import pykka
from models import Movie, Episode
logger = logging.getLogger(__name__)
class XBMCLibrary(pykka.ThreadingActor):
_movie_properties = ['title', 'year', 'imdbnumber', 'playcount']
def __init__(self):
pykka.ThreadingActor.__init__(self)
def movie(self, movieid):
params = {
'movieid': movieid,
'properties': self._movie_properties
}
response = jsonrpc('VideoLibrary.GetMovieDetails', params)
movie = response['result']['moviedetails']
return _load_movie(movie)
def episode(self, episodeid):
params = {
'episodeid': episodeid,
'properties': ['season', 'episode', 'playcount', 'tvshowid'],
}
episode = jsonrpc('VideoLibrary.GetEpisodeDetails', params)['result']['episodedetails']
params = {'tvshowid': episode['tvshowid'], 'properties': ['imdbnumber']}
tvshow = jsonrpc('VideoLibrary.GetTVShowDetails', params)['result']['tvshowdetails']
return _load_episode(episode, tvshow['imdbnumber'])
def movies(self):
params = {'properties': self._movie_properties}
response = jsonrpc('VideoLibrary.GetMovies', params)
movies = response['result'].get('movies', [])
movies = map(_load_movie, movies)
return [m for m in movies if m is not None]
def episodes(self):
params = {'properties': ['imdbnumber']}
tvshows = jsonrpc('VideoLibrary.GetTVShows', params)['result']\
.get('tvshows', [])
ret = []
for tvshow in tvshows:
params = {
'tvshowid': tvshow['tvshowid'],
'properties': ['season', 'episode', 'playcount', 'lastplayed']
}
episodes = jsonrpc('VideoLibrary.GetEpisodes', params)['result']\
.get('episodes', [])
episodes = [_load_episode(ep, tvshow['imdbnumber']) for ep in episodes]
ret.extend(episodes)
return ret
def update_movie_details(self, movie):
if not movie.xbmcid or movie.playcount <= 0:
return False
params = {'movieid': movie.xbmcid, 'playcount': movie.playcount}
r = jsonrpc('VideoLibrary.SetMovieDetails', params)
return r.get('result') == 'OK'
def update_episode_details(self, item):
if not item.xbmcid or item.playcount <= 0:
return False
params = {'episodeid': item.xbmcid, 'playcount': item.playcount}
r = jsonrpc('VideoLibrary.SetEpisodeDetails', params)
return r.get('result') == 'OK'
def _load_movie(r):
return Movie(
title=r['title'],
year=r['year'],
imdbid=r['imdbnumber'],
xbmcid=r['movieid'],
playcount=r['playcount'],
)
def _load_episode(r, tvshowid):
return Episode(
tvdbid=t
|
vshowid,
season=r['season'],
episode=r['episode'],
x
|
bmcid=r['episodeid'],
playcount=r['playcount'],
)
def jsonrpc(method, params=None):
if params is None:
params = {}
payload = {
'jsonrpc': '2.0',
'id': 1,
'method': method,
'params': params,
}
payload = json.dumps(payload, encoding='utf-8')
try:
import xbmc
except:
import requests
response = requests.post(
"http://localhost:8081/jsonrpc",
data=payload,
headers={'content-type': 'application/json'}).json()
else:
response = json.loads(xbmc.executeJSONRPC(payload), encoding='utf-8')
if 'error' in response:
logger.error("jsonrpc error: %r" % response)
return None
return response
|
flypy/flypy
|
flypy/tests/test_calling_conv.py
|
Python
|
bsd-2-clause
| 1,235 | 0.004049 |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from flypy import jit
class TestCallingConventionFromPython(unittest.TestCase):
def test_varargs(self):
@jit
def f(a, b, *args):
return [a, b, args[1]]
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
c
|
lass TestCallingFlypyConvention(unittest.TestCase):
def test_varargs(self):
@jit
def g(a, b, *args):
return [a, b, args[1]]
@jit
def f(a, b, c, d, e):
return g(a, b, c, d, e)
self.assertEqual(f(1, 2, 0, 3, 0), [1, 2, 3])
def test_unpacking(self):
@jit
def g(a, b, c):
return [a, b, c]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1,
|
2, 3])
def test_unpacking2(self):
raise unittest.SkipTest("unpacking with additional varargs")
@jit
def g(a, b, *args):
return [a, b, args[0]]
@jit
def f(*args):
return g(*args)
self.assertEqual(f(1, 2, 3), [1, 2, 3])
# TODO: Test unpacking with GenericTuple
if __name__ == '__main__':
unittest.main()
|
pettersoderlund/fondout
|
script/StockScraper-master/import_symbols_from_industry.py
|
Python
|
bsd-3-clause
| 1,827 | 0.004379 |
""" Scrape yahoo industry database through YQL """
import mysql.connector
import stockretriever
import sys
cnx = mysql.connector.connect(user='root', password='root', database='yahoo')
cursor = cnx.cursor()
add_employee = ("INSERT INTO stocks "
|
"(symbol, name, industry) "
"VALUES (%s, %s, %s) "
"ON DUPLICATE KEY UPDATE industry=VALUES(industry)")
sectors = stockretriever.get_industry_ids()
for sector in sectors:
for industry in sector['industry']:
try:
print "\nProcessing", industry['name'], industry['id']
ex
|
cept TypeError as E:
print E
continue
industry_index = stockretriever.get_industry_index(industry['id'])
try:
industry_name = industry_index['name']
industry_companies = industry_index['company']
industry_id = industry_index['id']
except Exception, e:
print e
continue
for company in industry_companies:
try:
data_employee = (company['symbol'], company['name'], industry_id)
try:
cursor.execute(add_employee, data_employee)
except mysql.connector.errors.IntegrityError, e:
print(e)
continue
try:
print "Success adding", company['symbol'], company['name']
except UnicodeEncodeError as e:
print e
cnx.commit()
except OSError as err:
print(err)
except TypeError as err:
print(err)
except Exception as e:
print "Unknown error, error caught.", e
continue
cursor.close()
cnx.close()
|
abrenaut/mrot
|
mrot/cli.py
|
Python
|
mit
| 1,423 | 0.003514 |
# -*- coding: utf-8 -*-
import logging
import argparse
from .imdb import find_movies
logger = logging.getLogger('mrot')
def parse_args():
parser = argparse.ArgumentParser(prog='mrot', description='Show movie ratings over time.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('movie_name', help='the name of the movie')
# Optional arguments
parser.add_argument("-c", "--concurrency", type=int, default=2,
help="maximum
|
number of concurrent requests to the wayback machine")
parser.add_argument("-d", "--delta", type=int, default=365, help="minimum number of days between two ratings")
parser.add_argument("-q", "--quiet", action="store_true", help="don't print progress")
args = parser.parse_args()
return args
def main():
args = parse_args()
logging.basicConfig(level=(logging.WARN if args.quiet else logging.INFO))
# Don't allow more than 20 concurrent requests to the wayba
|
ck machine
concurrency = min(args.concurrency, 10)
# Find the movies corresponding to the given movie name
imdb_movies = find_movies(args.movie_name)
if len(imdb_movies) > 0:
# Show rating for the first movie matching the given name
imdb_movie = imdb_movies[0]
imdb_movie.plot_ratings(concurrency, args.delta)
else:
logger.info('Movie not found')
|
jamielapointe/PyPassiveRangingFilters
|
pyPassiveRanging/dynamicsModels/cartState.py
|
Python
|
mit
| 236 | 0.016949 |
'''
Created on Apr 30, 2017
@author: jamie
'''
impo
|
rt numpy as np
class CartState(object):
'''
Defines a Cartesian state information
'''
pos_I = np.array([0., 0., 0.])
vel_I = np.array([0., 0., 0.])
| |
pixelpicosean/my-godot-2.1
|
version.py
|
Python
|
mit
| 89 | 0 |
short_name = "godot"
name
|
= "Godot Engine"
major =
|
2
minor = 1
patch = 4
status = "beta"
|
lucashanke/houseofdota
|
app/util/match_util.py
|
Python
|
mit
| 2,180 | 0.005046 |
from __future__ import division
import datetime
import pytz
from app.models import Patch
def get_match_patch(match_date):
utc = pytz.UTC
#pylint: disable=no-value-for-parameter
match_date = utc.localize(datetime.datetime.fromtimestamp(match_date))
for patch in Patch.objects.all().order_by('-start_date'):
if patch.start_date < match_date:
return patch
return None
#pylint: disable=invalid-name,too-many-arguments
def is_valid_match(gmd, public=None, league=None, team=None, solo=None, \
ranked=None, ap=None, cm=None, ar=None, rap=None):
return check_lobby_type(g
|
md, public, league, team, solo, ranked) is True \
and check_game_mode(gmd, ap, cm, ar, rap) is True \
and check_abandon(gmd) is False
#pylint: disable=invalid-name,too-many-arguments
def check_lobby_type(match, public=None, league=None, team=None, solo=None, ranked=None):
if pub
|
lic is None and league is None and team is None and solo is None and ranked is None:
public = league = team = solo = ranked = True
if match is not None:
match_type = match['lobby_type']
#pylint: disable=too-many-boolean-expressions
if (public and match_type == 0) or \
(league and match_type == 2) or \
(team and match_type == 5) or \
(solo and match_type == 6) or \
(ranked and match_type == 7):
return True
return False
else:
return None
def check_abandon(match_json):
if match_json is None:
return None
for player in match_json["players"]:
if player["leaver_status"] != 0 or player['hero_id'] is 0:
return True
return False
def check_game_mode(match, ap=None, cm=None, ar=None, rap=None):
if ap is None and cm is None and ar is None and rap is None:
ap = cm = ar = rap = True
game_mode = match["game_mode"]
#pylint: disable=too-many-boolean-expressions
if (ap and game_mode == 1) or \
(cm and game_mode == 2) or \
(ar and game_mode == 5) or \
(rap and game_mode == 22):
return True
return False
|
Sigmapoint/notos
|
src/notos/migrations/0001_initial.py
|
Python
|
mit
| 3,961 | 0.007321 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PushResult'
db.create_table(u'notos_pushresult', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('response_code', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
))
db.send_create_signal(u'notos', ['PushResult'])
# Adding model 'ScheduledPush'
db.create_table(u'notos_scheduledpush', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('scheduled_at', self.gf('django.db.models.fi
|
elds.DateTimeField')(auto_now_add=True, blank=True)),
('send_at', self.gf('django.db.models.fields.DateTimeField')()),
('canceled_at', self.gf('django.db.models.fields.DateTimeF
|
ield')(null=True, blank=True)),
('registration_id', self.gf('django.db.models.fields.CharField')(max_length=4095)),
('result', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['notos.PushResult'], unique=True, null=True, blank=True)),
('attempt_no', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=1)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('data', self.gf('json_field.fields.JSONField')(default=u'null')),
))
db.send_create_signal(u'notos', ['ScheduledPush'])
def backwards(self, orm):
# Deleting model 'PushResult'
db.delete_table(u'notos_pushresult')
# Deleting model 'ScheduledPush'
db.delete_table(u'notos_scheduledpush')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'notos.pushresult': {
'Meta': {'object_name': 'PushResult'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'response_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'notos.scheduledpush': {
'Meta': {'object_name': 'ScheduledPush'},
'attempt_no': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'canceled_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'data': ('json_field.fields.JSONField', [], {'default': "u'null'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'registration_id': ('django.db.models.fields.CharField', [], {'max_length': '4095'}),
'result': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['notos.PushResult']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'scheduled_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'send_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['notos']
|
adithyabhatkajake/kompile
|
domains/migrations/0004_auto_20161103_1044.py
|
Python
|
gpl-2.0
| 678 | 0.001475 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-03 10:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migratio
|
n):
dependencies = [
('domains', '0003_auto_20161103_1031'),
]
operations = [
migrations.RemoveField(
model_name='domain',
|
name='subtopics',
),
migrations.AddField(
model_name='subtopic',
name='dmain',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='subtopics', to='domains.domain'),
),
]
|
jhaase1/zapdos
|
tests/reflections/low_initial/ToCSV.py
|
Python
|
lgpl-2.1
| 586 | 0.003413 |
from paraview.simple import *
import os
import sys
import numpy as np
path = os.getcwd() + "/"
file_name = sys.argv[1]
inp = file_name + ".e"
outCSV = file_name + ".csv"
reader = ExodusIIReader(FileName=path+inp)
tsteps = reader.TimestepValues
writer = CreateWriter(path+file_name+"_Cells.c
|
sv", reader)
writer.FieldAssociation = "Cells" # or "Points"
writer.UpdatePipeline(time=tsteps[len(tsteps)-1])
del writer
writer = CreateWriter(path+file_name+"_Points.csv", reader)
writer.FieldAssociation = "Points" # or "Cells"
writer.UpdatePipeline(time=tsteps[len(
|
tsteps)-1])
del writer
|
dhuang/incubator-airflow
|
airflow/providers/ssh/hooks/ssh.py
|
Python
|
apache-2.0
| 14,364 | 0.001601 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for SSH connections."""
import os
import warnings
from base64 import decodebytes
from io import StringIO
from typing import Dict, Optional, Tuple, Union
import paramiko
from paramiko.config import SSH_PORT
from sshtunnel import SSHTunnelForwarder
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
try:
from airflow.utils.platform import getuser
except ImportError:
from getpass import getuser
class SSHHook(BaseHook):
"""
Hook for ssh remote execution using Paramiko.
ref: https://github.com/paramiko/paramiko
This hook also lets you create ssh tunnel and serve as basis for SFTP file transfer
:param ssh_conn_id: :ref:`ssh connection id<howto/connection:ssh>` from airflow
Connections from where all the required parameters can be fetched like
username, password or key_file. Thought the priority is given to the
param passed during init
:type ssh_conn_id: str
:param remote_host: remote host to connect
:type remote_host: str
:param username: username to connect to the remote_host
:type username: str
:param password: password of the username to connect to the remote_host
:type password: str
:param key_file: path to key file to use to connect to the remote_host
:type key_file: str
:param port: port of remote host to connect (Default is paramiko SSH_PORT)
:type port: int
:param timeout: timeout for the attempt to connect to the remote_host.
:type timeout: int
:param keepalive_interval: send a keepalive packet to remote host every
keepalive_interval seconds
:type keepalive_interval: int
"""
# List of classes to try loading private keys as, ordered (roughly) by most common to least common
_pkey_loaders = (
paramiko.RSAKey,
paramiko.ECDSAKey,
paramiko.Ed25519Key,
paramiko.DSSKey,
)
_host_key_mappings = {
'rsa': paramiko.RSAKey,
'dss': paramiko.DSSKey,
'ecdsa': paramiko.ECDSAKey,
'ed25519': paramiko.Ed25519Key,
}
conn_name_attr = 'ssh_conn_id'
default_conn_name = 'ssh_default'
conn_type = 'ssh'
hook_name = 'SSH'
@staticmethod
def get_ui_field_behaviour() -> Dict:
"""Returns custom field behaviour"""
return {
"hidden_fields": ['schema'],
"relabeling": {
'login': 'Username',
},
}
def __init__(
self,
ssh_conn_id: Optional[str] = None,
remote_host: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
key_file: Optional[str] = None,
port: Optional[int] = None,
timeout: int = 10,
keepalive_interval: int = 30,
) -> None:
super().__init__()
self.ssh_conn_id = ssh_conn_id
self.remote_host = remote_host
self.username = username
self.password = password
self.key_file = key_file
self.pkey = None
self.port = port
self.timeout = timeout
self.keepalive_interval = keepalive_interval
# Default values, overridable from Connection
|
self.compress = True
self.no_host_key_check = True
self.allow_host_key_change = False
self.host_proxy = None
self.host_k
|
ey = None
self.look_for_keys = True
# Placeholder for deprecated __enter__
self.client = None
# Use connection to override defaults
if self.ssh_conn_id is not None:
conn = self.get_connection(self.ssh_conn_id)
if self.username is None:
self.username = conn.login
if self.password is None:
self.password = conn.password
if self.remote_host is None:
self.remote_host = conn.host
if self.port is None:
self.port = conn.port
if conn.extra is not None:
extra_options = conn.extra_dejson
if "key_file" in extra_options and self.key_file is None:
self.key_file = extra_options.get("key_file")
private_key = extra_options.get('private_key')
private_key_passphrase = extra_options.get('private_key_passphrase')
if private_key:
self.pkey = self._pkey_from_private_key(private_key, passphrase=private_key_passphrase)
if "timeout" in extra_options:
self.timeout = int(extra_options["timeout"], 10)
if "compress" in extra_options and str(extra_options["compress"]).lower() == 'false':
self.compress = False
host_key = extra_options.get("host_key")
no_host_key_check = extra_options.get("no_host_key_check")
if no_host_key_check is not None:
no_host_key_check = str(no_host_key_check).lower() == "true"
if host_key is not None and no_host_key_check:
raise ValueError("Must check host key when provided")
self.no_host_key_check = no_host_key_check
if (
"allow_host_key_change" in extra_options
and str(extra_options["allow_host_key_change"]).lower() == 'true'
):
self.allow_host_key_change = True
if (
"look_for_keys" in extra_options
and str(extra_options["look_for_keys"]).lower() == 'false'
):
self.look_for_keys = False
if host_key is not None:
if host_key.startswith("ssh-"):
key_type, host_key = host_key.split(None)[:2]
key_constructor = self._host_key_mappings[key_type[4:]]
else:
key_constructor = paramiko.RSAKey
decoded_host_key = decodebytes(host_key.encode('utf-8'))
self.host_key = key_constructor(data=decoded_host_key)
self.no_host_key_check = False
if self.pkey and self.key_file:
raise AirflowException(
"Params key_file and private_key both provided. Must provide no more than one."
)
if not self.remote_host:
raise AirflowException("Missing required param: remote_host")
# Auto detecting username values from system
if not self.username:
self.log.debug(
"username to ssh to host: %s is not specified for connection id"
" %s. Using system's default provided by getpass.getuser()",
self.remote_host,
self.ssh_conn_id,
)
self.username = getuser()
user_ssh_config_filename = os.path.expanduser('~/.ssh/config')
if os.path.isfile(user_ssh_config_filename):
ssh_conf = paramiko.SSHConfig()
with open(user_ssh_config_filename) as config_fd:
ssh_conf.parse(config_fd)
host_info = ssh_conf.lookup(self.remote_host)
if host_info and host_info.get('proxycommand'):
self.host_proxy = paramiko.ProxyCommand(host_info.get('proxycommand'))
if not (self.
|
openprocurement/reports
|
reports/helpers.py
|
Python
|
apache-2.0
| 9,179 | 0.000545 |
import os.path
import argparse
import arrow
import iso8601
import requests
import json
import re
import datetime
from yaml import load
from repoze.lru import lru_cache
from dateutil.parser import parse
from time import sleep
from retrying import retry
from logging import getLogger
RE = re.compile(r'(^.*)@(\d{4}-\d{2}-\d{2}--\d{4}-\d{2}-\d{2})?-([a-z\-]*)\.zip')
LOGGER = getLogger("BILLING")
def get_arguments_parser():
parser = argparse.ArgumentParser(
description="Openprocurement Billing"
)
report = parser.add_argument_group('Report', 'Report parameters')
report.add_argument(
'-c',
'--config',
dest='config',
required=True,
help="Path to config file. Required"
)
report.add_argument(
'-b',
'--broker',
dest='broker',
required=True,
help='Broker name. Required'
)
report.add_argument(
'-p',
'--period',
nargs='+',
dest='period',
default=[],
help='Specifies period for billing report.\n '
'By default report will be generated from all database'
)
report.add_argument(
'-t',
'--timezone',
dest='timezone',
default='Europe/Kiev',
help='Timezone. Default "Europe/Kiev"'
)
return parser
def thresholds_headers(cthresholds):
prev_threshold = None
result = []
thresholds = [str(t / 1000) for t in cthresholds]
for t in thresholds:
if not prev_threshold:
result.append("<= " + t)
else:
result.append(">" + prev_threshold + "<=" + t)
prev_threshold = t
result.append(">" + thresholds[-1])
return result
@lru_cache(10000)
@retry(wait_exponential_multiplier=1000, stop_max_attempt_number=5)
def get_rate(currency, date, proxy_address=None):
base_url = 'http://bank.gov.ua/NBUStatService'\
'/v1/statdirectory/exchange?date={}&json'.format(
iso8601.parse_date(date).strftime('%Y%m%d')
)
if proxy_address:
resp = requests.get(base_url, proxies={'http': proxy_address}).text.encode('utf-8')
else:
resp = requests.get(base_url).text.encode('utf-8')
doc = json.loads(resp)
if currency == u'RUR':
currency = u'RUB'
rate = filter(lambda x: x[u'cc'] == currency, doc)[0][u'rate']
sleep(15)
return rate
def value_currency_normalize(value, currency, date, proxy_address=None):
if not isinstance(value, (float, int)):
raise ValueError
rate = get_rate(currency, date, proxy_address)
return value * rate, rate
def create_db_url(host, port, user, passwd, db_name=''):
up = ''
if user and passwd:
up = '{}:{}@'.format(user, passwd)
url = 'http://{}{}:{}'.format(up, host, port)
if db_name:
url += '/{}'.format(db_name)
return url
class Kind(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.kinds = set(['general', 'special', 'defense', '_kind'])
super(Kind, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=self.kinds,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(
self, parser, args, values, option_string=None):
options = values.split('=')
self.parser = parser
if len(options) < 2:
parser.error("usage <option>=<kind>")
action = options[0]
kinds = options[1].split(',')
try:
getattr(self, action)(kinds)
except AttributeError:
self.parser.error("<option> should be one from [include, exclude, one]")
setattr(args, self.dest, self.kinds)
def include(self, kinds):
for kind in kinds:
self.kinds.add(kind)
def exclude(self, kinds):
for kind in kinds:
if kind in self.kinds:
self.kinds.remove(kind)
def one(self, kinds):
for kind in kinds:
if kind not in ['general', 'special', 'defense', 'other', '_kind']:
self.parser.error('Allowed only general, special, defense, other and _kind')
self.kinds = set(kinds)
class Status(argparse.Action):
def __init__(self,
option_strings,
dest,
nargs=None,
const=None,
default=None,
type=None,
choices=None,
required=False,
help=None,
metavar=None):
self.statuses = {'action': '', 'statuses': set([u'active',
u'complete',
u'active.awarded',
u'cancelled',
u'unsuccessful'
])}
super(Status, self).__init__(
option_strings=option_strings,
dest=dest,
nargs=nargs,
const=const,
default=self.statuses,
type=type,
choices=choices,
required=required,
help=help,
metavar=metavar)
def __call__(
self, parser, args, values, option_string=None):
options = values.split('=')
self.parser = parser
if len(options) < 2:
parser.error("usage <option>=<kind>")
action = options[0]
statuses = options[1].split(',')
try:
getattr(self, action)(statuses)
except AttributeError:
self.parser.error("<option> should be one from [include, exclude, one]")
setattr(args, self.dest, self.statuses)
def include(self, sts):
self.statuses['action'] = 'include'
for status in sts:
self.statuses['statuses'].add(status)
def exclude(self, sts):
self.statuses['action'] = 'exclude'
for status in sts:
if status in self.statuses:
self.statuses['statuses'].remove(status)
def one(self, sts):
self.statuses['action'] = 'one'
self.statuses['statuses'] = set(sts)
def convert_date(
date, timezone="Europe/Ki
|
ev",
to="UTC", format="%Y-%m-%dT%H:%M:%S.%f"
):
date = arrow.get(parse(date), timezone)
return date.to
|
(to).strftime(format)
def prepare_report_interval(period=None):
if not period:
return ("", "9999-12-30T00:00:00.000000")
if len(period) == 1:
return (convert_date(period[0]), "9999-12-30T00:00:00.000000")
if len(period) == 2:
return (convert_date(period[0]), convert_date(period[1]))
raise ValueError("Invalid period")
def prepare_result_file_name(utility):
start, end = "", ""
if utility.start_date:
start = convert_date(
utility.start_date,
timezone="UTC",
to="Europe/Kiev",
format="%Y-%m-%d"
)
if not utility.end_date.startswith("9999"):
end = convert_date(
utility.end_date,
timezone="UTC",
to="Europe/Kiev",
format="%Y-%m-%d"
)
return os.path.join(
utility.config.out_path,
"{}@{}--{}-{}.csv".format(
utility.broker,
start,
end,
utility.operation
)
)
def parse_period_string(period):
if period:
dates = period.split('--')
if len(dates) > 2:
raise ValueError("Invalid date string")
start, end = [parse(date) for date in period.split('--'
|
sanSS/programming-contests
|
project-euler/problem142.py
|
Python
|
gpl-3.0
| 1,119 | 0.003575 |
#!/usr/bin/env python3
########################################################################
# Solves problem 142 from projectEuler.net.
# ???
# Copyright (C) 2011 Santiago Alessandri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR
|
A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# You can contact me at san.lt.ss@gmail.com
# Visit my wiki at http://san-ss.is-a-gee
|
k.com.ar
########################################################################
# x + y = a
# x + z = b
# y + z = c
# x - y = d
# x - z = e
# y - z = f
# e = a - c
# f =
|
Gaia3D/QGIS
|
python/ext-libs/owslib/iso.py
|
Python
|
gpl-2.0
| 36,451 | 0.005816 |
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2009 Tom Kralidis
#
# Authors : Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Contact email: tomkralidis@gmail.com
# =============================================================================
""" ISO metadata parser """
from owslib.etree import etree
from owslib import util
from owslib.namespaces import Namespaces
# default variables
def get_namespaces():
n = Namespaces()
ns = n.get_namespaces(["gco","gmd","gml","gml32","gmx","gts","srv","xlink"])
ns[None] = n.get_namespace("gmd")
return ns
namespaces = get_namespaces()
class MD_Metadata(object):
""" Process gmd:MD_Metadata """
def __init__(self, md=None):
if md is None:
self.xml = None
self.identifier = None
self.parentidentifier = None
self.language = None
self.dataseturi = None
self.languagecode = None
self.datestamp = None
self.charset = None
self.hierarchy = None
self.contact = []
self.datetimestamp = None
self.stdname = None
self.stdver = None
self.referencesystem = None
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
self.distribution = None
self.dataquality = None
else:
if hasattr(md, 'getroot'): # standalone document
self.xml = etree.tostring(md.getroot())
else: # part of a larger document
self.xml = etree.tostring(md)
val = md.find(util.nspath_eval('gmd:fileIdentifier/gco:CharacterString', namespaces))
self.identifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:parentIdentifier/gco:CharacterString', namespaces))
self.parentidentifier = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gco:CharacterString', namespaces))
self.language = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dataSetURI/gco:CharacterString', namespaces))
self.dataseturi = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:language/gmd:LanguageCode', namespaces))
self.languagecode = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:Date', namespaces))
self.datestamp = util.testXMLValue(val)
if not self.datestamp:
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datestamp = util.testXMLValue(val)
self.charset = _testCodeListValue(md.find(util.nspath_eval('gmd:characterSet/gmd:MD_CharacterSetCode', namespaces)))
self.hierarchy = _testCodeListValue(md.find(util.nspath_eval('gmd:hierarchyLevel/gmd:MD_ScopeCode', namespaces)))
self.contact = []
for i in md.findall(util.nspath_eval('gmd:contact/gmd:CI_ResponsibleParty', namespaces)):
o = CI_ResponsibleParty(i)
self.contact.append(o)
val = md.find(util.nspath_eval('gmd:dateStamp/gco:DateTime', namespaces))
self.datetimestamp = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardName/gco:CharacterString', namespaces))
self.stdname = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:metadataStandardVersion/gco:CharacterString', namespaces))
self.stdver = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:referenceSystemInfo/gmd:MD_ReferenceSystem', namespaces))
if val is not None:
self.referencesystem = MD_ReferenceSystem(val)
else:
self.referencesystem = None
# TODO: merge .identificationinfo into .identification
#warnings.warn(
# 'the .identification and .serviceidentification properties will merge into '
# '.identification being a list of properties. This is currently implemented '
# 'in .identificationinfo. '
# 'Please see https://github.com/geopython/OWSLib/issues/38 for more information',
# FutureWarning)
val = md.find(util.nspath_eval('gmd:identificationInfo/gmd:MD_DataIdentification', namespaces))
val2 = md.find(util.nspath_eval('gmd:identificationInfo/srv:SV_ServiceIdentification', namespaces))
if val is not None:
self.identification = MD_DataIdentification(val, 'dataset')
self.serviceidentification = None
elif val2 is not None:
self.identification = MD_DataIdentification(val2, 'service')
self.serviceidentification = SV_ServiceIdentification(val2)
else:
self.identification = None
self.serviceidentification = None
self.identificationinfo = []
for idinfo in md.findall(util.nspath_eval('gmd:identificationInfo', namespaces)):
val = list(idinfo)[0]
tagval = util.xmltag_split(val.tag)
if tagval == 'MD_DataIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'dataset'))
elif tagval == 'MD_ServiceIdentification':
self.identificationinfo.append(MD_DataIdentification(val, 'service'))
elif tagval == 'SV_ServiceIdentification':
self.identificationinfo.append(SV_ServiceIdentification(val))
val = md.find(util.nspath_eval('gmd:distributionInfo/gmd:MD_Distribution', namespaces))
if val is not None:
self.distribution = MD_Distribution(val)
else:
self.distribution = None
val = md.find(util.nspath_eval('gmd:dataQualityInfo/gmd:DQ_DataQuality', namespaces))
if val is not None:
self.dataquality = DQ_DataQuality(val)
else:
self.dataquality = None
class CI_Date(object):
""" process CI_Date """
def __init__(self, md=None):
if md is None:
self.date = None
self.type = None
else:
val = md.find(util.nspath_eval('gmd:date/gco:Date', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
val = md.find(util.nspath_eval('gmd:date/gco:DateTime', namespaces))
if val is not None:
self.date = util.testXMLValue(val)
else:
self.date = None
val = md.find(util.nspath_eval('gmd:dateType/gmd:CI_DateTypeCode', namespaces))
self.type = _testCodeListValue(val)
class CI_ResponsibleParty(object):
""" process CI_ResponsibleParty """
def __init__(self, md=None):
if md is None:
self.name = None
self.organization = None
self.position = None
self.phone = None
self.fax = None
self.address = None
self.city = None
self.region = None
self.postcode = None
self.country = None
self.email = None
self.onlineresource = None
self.role = None
else:
val = md.find(util.nspath_eval('gmd:individualName/gco:CharacterString', namespaces))
self
|
.name = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:organi
|
sationName/gco:CharacterString', namespaces))
self.organization = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:positionName/gco:CharacterString', namespaces))
self.position = util.testXMLValue(val)
val = md.find(util.nspath_eval('gmd:contactInfo/gmd:CI_Contac
|
adler-j/lie_grp_diffeo
|
examples/deformation_closest_pt_2d.py
|
Python
|
gpl-3.0
| 3,110 | 0.000322 |
import lie_group_diffeo as lgd
import odl
import numpy as np
# Select space and interpolation
space = odl.uniform_discr([-1, -1], [1, 1], [200, 200], interp='linear')
# Select template and target as gaussians
template = space.element(lambda x: np.exp(-(5 * x[0]**2 + x[1]**2) / 0.4**2))
target = space.element(lambda x: np.exp(-(1 * (x[0] + 0.2)**2 + x[1]**2) / 0.4**2))
# Define data matching functional
data_matching = odl.solvers.L2NormSquared(space).translated(target)
# Define the lie group to use.
lie_grp_type = 'affine'
if lie_grp_type == 'gln':
lie_grp = lgd.GLn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'son':
lie_grp = lgd.SOn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'sln':
lie_grp = lgd.SLn(space.ndim)
deform_action = lgd.MatrixImageAction(lie_grp, space)
elif lie_grp_type == 'affine':
lie_grp = lgd.AffineGroup(space.ndim)
deform_action = lgd.MatrixImageAffineAction(lie_grp, space)
elif lie_grp_type == 'rigid':
lie_grp = lgd.EuclideanGroup(space.ndim)
deform_action = lgd.MatrixImageAffineAction(lie_grp, space)
else:
assert False
# Define what regularizer to use
regularizer = 'determinant'
if regularizer == 'image':
# Create set of all points in space
W = space.tangent_bundle
w = W.element(space.points().T)
# Create regularizing functional
regularizer = 0.01 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
regularizer_action = lgd.ProductSpaceAction(deform_act
|
ion, W.size)
elif regularizer == 'point':
W = odl.ProductSpace(odl.rn(space.ndim), 3)
w = W.element([[0, 0],
[0, 1],
[1, 0]])
# Create regularizing functional
regularizer = 0.01 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
if lie_grp_type == 'affine' or lie_grp_type == 'rigid':
point_acti
|
on = lgd.MatrixVectorAffineAction(lie_grp, W[0])
else:
point_action = lgd.MatrixVectorAction(lie_grp, W[0])
regularizer_action = lgd.ProductSpaceAction(point_action, W.size)
elif regularizer == 'determinant':
W = odl.rn(1)
w = W.element([1])
# Create regularizing functional
regularizer = 0.2 * odl.solvers.L2NormSquared(W).translated(w)
# Create action
regularizer_action = lgd.MatrixDeterminantAction(lie_grp, W)
else:
assert False
# Initial guess
g = lie_grp.identity
# Combine action and functional into single object.
action = lgd.ProductSpaceAction(deform_action, regularizer_action)
x = action.domain.element([template, w]).copy()
f = odl.solvers.SeparableSum(data_matching, regularizer)
# Show some results, reuse the plot
template.show('template')
target.show('target')
# Create callback that displays the current iterate and prints the function
# value
callback = odl.solvers.CallbackShow(lie_grp_type, step=10, indices=0)
callback &= odl.solvers.CallbackPrint(f)
# Solve via gradient flow
lgd.gradient_flow_solver(x, f, g, action,
niter=500, line_search=0.2, callback=callback)
|
renalreg/radar
|
tests/auth/test_passwords.py
|
Python
|
agpl-3.0
| 1,704 | 0.000587 |
from radar.auth.passwords import (
check_password_hash,
generate_password,
generate_password_hash,
get_password_length,
is_strong_password,
password_to_nato_str,
)
from radar.models.users import User
def test_password_to_nato_str():
password = 'aAzZ123'
assert password_to_nato_str(password) == 'lower alfa, UPPER ALFA, lower zulu, UPPER ZULU, ONE, TWO, THREE'
def test_password_hash():
password = 'password123'
password_hash = generate_password_hash('password123')
assert password_hash != password
assert check_password_hash(password_hash, password)
def test_generate_password(app):
with app.app_context():
password = generate_password()
assert len(password) == get_password_length()
def test_weak_passwords(app):
with app.app_context():
assert not is_strong_password('password123')
def test_strong_passwords(app):
with app.app_context():
assert is_strong_password('besiderisingwoodennearer')
assert is_strong_password('7pJnW4yUWx')
def test_weak_passwords_for_user(app):
user = User()
user.username = 'dtclihbswm'
user.email = 'rihylunxov@example.org'
user.first_name = 'fvgmptirzl'
user.last_name =
|
'uehnpqjarf'
suffix = 'hello418'
username_password = user.username + suffix
email_password = user.email + suffix
first_name_password = user.first_n
|
ame + suffix
last_name_password = user.last_name + suffix
with app.app_context():
assert is_strong_password(username_password)
assert is_strong_password(email_password)
assert is_strong_password(first_name_password)
assert is_strong_password(last_name_password)
|
immanetize/nikola
|
nikola/packages/tzlocal/windows_tz.py
|
Python
|
mit
| 26,452 | 0 |
# This file is autogenerated by the get_windows_info.py script
# Do not edit.
win_tz = {
'AUS Central Standard Time': 'Australia/Darwin',
'AUS Eastern Standard Time': 'Australia/Sydney',
'Afghanistan Standard Time': 'Asia/Kabul',
'Alaskan Standard Time': 'America/Anchorage',
'Arab Standard Time': 'Asia/Riyadh',
'Arabian Standard Time': 'Asia/Dubai',
'Arabic Standard Time': 'Asia/Baghdad',
'Argentina Standard Time': 'America/Buenos_Aires',
'Atlantic Standard Time': 'America/Halifax',
'Azerbaijan Standard Time': 'Asia/Baku',
'Azores Standard Time': 'Atlantic/Azores',
'Bahia Standard Time': 'America/Bahia',
'Bangladesh Standard Time': 'Asia/Dhaka',
'Canada Central Standard Time': 'America/Regina',
'Cape Verde Standard Time': 'Atlantic/Cape_Verde',
'Caucasus Standard Time': 'Asia/Yerevan',
'Cen. Australia Standard Time': 'Australia/Adelaide',
'Central America Standard Time': 'America/Guatemala',
'Central Asia Standard Time': 'Asia/Almaty',
'Central Brazilian Standard Time': 'America/Cuiaba',
'Central Europe Standard Time': 'Europe/Budapest',
'Central European Standard Time': 'Europe/Warsaw',
'Central Pacific Standard Time': 'Pacific/Guadalcanal',
'Central Standard Time': 'America/Chicago',
'Central Standard Time (Mexico)': 'America/Mexico_City',
'China Standard Time': 'Asia/Shanghai',
'Dateline Standard Time': 'Etc/GMT+12',
'E. Africa Standard Time': 'Africa/Nairobi',
'E. Australia Standard Time': 'Australia/Brisbane',
'E. Europe Standard Time': 'Asia/Nicosia',
'E. South America Standard Time': 'America/Sao_Paulo',
'Eastern Standard Time': 'America/New_York',
'Egypt Standard Time': 'Africa/Cairo',
'Ekaterinburg Standard Time': 'Asia/Yekaterinburg',
'FLE Standard Time': 'Europe/Kiev',
'Fiji Standard Time': 'Pacific/Fiji',
'GMT Standard Time': 'Europe/London',
'GTB Standard Time': 'Europe/Bucharest',
'Georgian Standard Time': 'Asia/Tbilisi',
'Greenland Standard Time': 'America/Godthab',
'Greenwich Standard Time': 'Atlantic/Reykjavik',
'Hawaiian Standard Time': 'Pacific/Honolulu',
'India Standard Time': 'Asia/Calcutta',
'Iran Standard Time': 'Asia/Tehran',
'Israel Standard Time': 'Asia/Jerusalem',
'Jordan Standard Time': 'Asia/Amman',
'Kaliningrad Standard Time': 'Europe/Kaliningrad',
'Korea Standard Time': 'Asia/Seoul',
'Libya Standard Time': 'Africa/Tripoli',
'Magadan Standard Time': 'Asia/Magadan',
'Mauritius Standard Time': 'Indian/Mauritius',
'Middle East Standard Time': 'Asia/Beirut',
'Montevideo Standard Time': 'America/Montevideo',
'Morocco Standard Time': 'Africa/Casablanca',
'Mountain Standard Time': 'America/Denver',
'Mountain Standard Time (Mexico)': 'America/Chihuahua',
'Myanmar Standard Time': 'Asia/Rangoon',
'N. Central Asia Standard Time': 'Asia/Novosibirsk',
'Namibia Standard Time': 'Africa/Windhoek',
'Nepal Standard Time': 'Asia/Katmandu',
'New Zealand Standard Time': 'Pacific/Auckland',
'Newfoundland Standard Time': 'America/St_Johns',
'North Asia East Standard Time': 'Asia/Irkutsk',
'North Asia Standard Time': 'Asia/Krasnoyarsk',
'Pacific SA Standard Time': 'America/Santiago',
'Pacific Standard Time': 'America/Los_Angeles',
'Pacific Standard Time (Mexico)': 'America/Santa_Isabel',
'Pakistan Standard Time': 'Asia/Karachi',
'Paraguay Standard Time': 'America/Asuncion',
'Romance Standard Time': 'Europe/Paris',
'Russian Standard Time': 'Europe/Moscow',
'SA Eastern Standard Time': 'America/Cayenne',
'SA Pacific Standard Time': 'America/Bogota',
'SA Western Standard Time': 'America/La_Paz',
'SE Asia Standard Time': 'Asia/Bangkok',
'Samoa Standard Time': 'Pacific/Apia',
'Singapore Standard Time': 'Asia/Singapore',
'South Africa Standard Time': 'Africa/Johannesburg',
'Sri Lanka Standard Time': 'Asia/Colombo',
'Syria Standard Time': 'Asia/Damascus',
'Taipei Standard Time': 'Asia/Taipei',
'Tasmania Standard Time': 'Australia/Hobart',
'Tokyo Standard Time': 'Asia/Tokyo',
'Tonga Standard Time': 'Pacific/Tongatapu',
'Turkey Standard Time': 'Europe/Istanbul',
'US Eastern Standard Time': 'America/Indianapolis',
'US Mountain Standard Time': 'America/Phoenix',
'UTC': 'Etc/GMT',
'UTC+12': 'Etc/GMT-12',
'UTC-02': 'Etc/GMT+2',
'UTC-11': 'Etc/GMT+11',
'Ulaanbaatar Standard Time': 'Asia/Ulaanbaatar',
'Venezuela Standard Time': 'America/Caracas',
'Vladivostok Standard Time': 'Asia/Vladivostok',
'W. Australia Standard Time': 'Australia/Perth',
'W. Central Africa Standard Time': 'Africa/Lagos',
'W. Europe Standard Time': 'Europe/Berlin',
'West Asia Standard Time': 'Asia/Tashkent',
'West Pacific Standard Time': 'Pacific/Port_Moresby',
'Yakutsk Standard Time': 'Asia/Yakutsk'
}
# Old name for the win_tz variable:
tz_names = win_tz
tz_win = {
'Africa/Abidjan': 'Greenwich Standard Time',
'Africa/Accra': 'Greenwich Standard Time',
'Africa/Addis_Ababa': 'E. Africa Standard Time',
'Africa/Algiers': 'W. Central Africa Standard Time',
'Africa/Asmera': 'E. Africa Standard Time',
'Africa/Bamako': 'Greenwich Standard Time',
'Africa/Bangui': 'W. Central Africa Standard Time',
'Africa/Banjul': 'Greenwich Standard Time',
'Africa/Bissau': 'Greenwich Standard Time'
|
,
'Africa/Blantyre': 'South Africa Standard Time',
'Africa/Brazzaville': 'W. Central Africa Standard Time',
'Africa/Bujumbura': 'South Africa Standard Time',
'Africa/Cairo': 'Egypt Standard Time',
'Africa/Casablanca': 'Morocco Standard Time',
'Africa/Ceuta': 'Romance Standard Time',
'Africa/Conakry': 'Greenwich Standard Time',
'Africa/Dakar': 'Greenwich Standard Time',
'Africa/Dar_es_Salaam': 'E. Africa Standard Time',
'Africa/Djibouti': 'E.
|
Africa Standard Time',
'Africa/Douala': 'W. Central Africa Standard Time',
'Africa/El_Aaiun': 'Morocco Standard Time',
'Africa/Freetown': 'Greenwich Standard Time',
'Africa/Gaborone': 'South Africa Standard Time',
'Africa/Harare': 'South Africa Standard Time',
'Africa/Johannesburg': 'South Africa Standard Time',
'Africa/Juba': 'E. Africa Standard Time',
'Africa/Kampala': 'E. Africa Standard Time',
'Africa/Khartoum': 'E. Africa Standard Time',
'Africa/Kigali': 'South Africa Standard Time',
'Africa/Kinshasa': 'W. Central Africa Standard Time',
'Africa/Lagos': 'W. Central Africa Standard Time',
'Africa/Libreville': 'W. Central Africa Standard Time',
'Africa/Lome': 'Greenwich Standard Time',
'Africa/Luanda': 'W. Central Africa Standard Time',
'Africa/Lubumbashi': 'South Africa Standard Time',
'Africa/Lusaka': 'South Africa Standard Time',
'Africa/Malabo': 'W. Central Africa Standard Time',
'Africa/Maputo': 'South Africa Standard Time',
'Africa/Maseru': 'South Africa Standard Time',
'Africa/Mbabane': 'South Africa Standard Time',
'Africa/Mogadishu': 'E. Africa Standard Time',
'Africa/Monrovia': 'Greenwich Standard Time',
'Africa/Nairobi': 'E. Africa Standard Time',
'Africa/Ndjamena': 'W. Central Africa Standard Time',
'Africa/Niamey': 'W. Central Africa Standard Time',
'Africa/Nouakchott': 'Greenwich Standard Time',
'Africa/Ouagadougou': 'Greenwich Standard Time',
'Africa/Porto-Novo': 'W. Central Africa Standard Time',
'Africa/Sao_Tome': 'Greenwich Standard Time',
'Africa/Tripoli': 'Libya Standard Time',
'Africa/Tunis': 'W. Central Africa Standard Time',
'Africa/Windhoek': 'Namibia Standard Time',
'America/Anchorage': 'Alaskan Standard Time',
'America/Anguilla': 'SA Western Standard Time',
'America/Antigua': 'SA Western Standard Time',
'America/Araguaina': 'SA Eastern Standard Time',
'America/Argentina/La_Rioja': 'Argentina Standard Time',
'America/Argentina/Rio_Gallegos': 'Argentina Standard Time',
'America/Argentina/Salta': 'Argentina Standard Time',
'America/Argentina/San_Juan': 'Argentina Standard Time',
'America/Argentina/S
|
Kitware/arctic-viewer
|
scripts/examples/vtk/medical/head-ct-volume.py
|
Python
|
bsd-3-clause
| 3,622 | 0.002485 |
# -----------------------------------------------------------------------------
# Download data:
# - Browser:
# http://midas3.kitware.com/midas/folder/10409 => VisibleMale/vm_head_frozenct.mha
# - Terminal
# curl "http://midas3.kitware.com/midas/download?folders=&items=235235" -o vm_head_frozenct.mha
# -----------------------------------------------------------------------------
from vtk import *
from vtk.web.query_data_model import *
from vtk.web.dataset_builder import *
# -----------------------------------------------------------------------------
# User configuration
# -----------------------------------------------------------------------------
dataset_destination_path = '/Users/seb/Desktop/vm_head_frozenct_vi_%s_%s_%s'
file_path = '/Users/seb/Downloads/vm_head_frozenct.mha'
field = 'MetaImage'
fieldRange = [0.0, 4095.0]
nbSteps = 4
# -----------------------------------------------------------------------------
# VTK Helper methods
# -----------------------------------------------------------------------------
def updatePieceWise(pwf, dataRange, center, halfSpread):
scalarOpacity.RemoveAllPoints()
if (center - halfSpread) <= dataRange[0]:
scalarOpacity.AddPoint(dataRange[0], 0.0)
scalarOpacity.AddPoint(center, 1.0
|
)
else:
scalarOpacity.AddPoint(dataRang
|
e[0], 0.0)
scalarOpacity.AddPoint(center - halfSpread, 0.0)
scalarOpacity.AddPoint(center, 1.0)
if (center + halfSpread) >= dataRange[1]:
scalarOpacity.AddPoint(dataRange[1], 0.0)
else:
scalarOpacity.AddPoint(center + halfSpread, 0.0)
scalarOpacity.AddPoint(dataRange[1], 0.0)
# -----------------------------------------------------------------------------
# VTK Pipeline creation
# -----------------------------------------------------------------------------
reader = vtkMetaImageReader()
reader.SetFileName(file_path)
mapper = vtkGPUVolumeRayCastMapper()
mapper.SetInputConnection(reader.GetOutputPort())
mapper.RenderToImageOn()
colorFunction = vtkColorTransferFunction()
colorFunction.AddRGBPoint(fieldRange[0], 1.0, 1.0, 1.0)
colorFunction.AddRGBPoint(fieldRange[1], 1.0, 1.0, 1.0)
halfSpread = (fieldRange[1] - fieldRange[0]) / float(2*nbSteps)
centers = [ fieldRange[0] + halfSpread*float(2*i+1) for i in range(nbSteps)]
scalarOpacity = vtkPiecewiseFunction()
volumeProperty = vtkVolumeProperty()
volumeProperty.ShadeOn()
volumeProperty.SetInterpolationType(VTK_LINEAR_INTERPOLATION)
volumeProperty.SetColor(colorFunction)
volumeProperty.SetScalarOpacity(scalarOpacity)
volume = vtkVolume()
volume.SetMapper(mapper)
volume.SetProperty(volumeProperty)
window = vtkRenderWindow()
window.SetSize(499, 400)
renderer = vtkRenderer()
window.AddRenderer(renderer)
renderer.AddVolume(volume)
renderer.ResetCamera()
window.Render()
# Camera setting
camera = {
'position': [-0.264, -890.168, -135.0],
'focalPoint': [-0.264, -30.264, -135.0],
'viewUp': [0,0,1]
}
update_camera(renderer, camera)
# -----------------------------------------------------------------------------
# Data Generation
# -----------------------------------------------------------------------------
# Create Image Builder
vcdsb = SortedCompositeDataSetBuilder(dataset_destination_path % (nbSteps, halfSpread, window.GetSize()[0]), {'type': 'spherical', 'phi': [0], 'theta': [0]})
idx = 0
vcdsb.start(window, renderer)
for center in centers:
idx += 1
updatePieceWise(scalarOpacity, fieldRange, center, halfSpread)
# Capture layer
vcdsb.activateLayer(field, center)
# Write data
vcdsb.writeData(mapper)
vcdsb.stop()
|
SymbiFlow/prjxray
|
utils/xjson.py
|
Python
|
isc
| 558 | 0 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of
|
this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import sys
import json
from prjxray.xjson import pprint
if __name__ == "__main__":
if len(sys.argv) == 1:
import doctest
doctest.testmod()
else:
assert len(sys.argv) == 2
|
d = json.load(open(sys.argv[1]))
pprint(sys.stdout, d)
|
naparuba/check-linux-by-ssh
|
check_ntp_sync_by_ssh.py
|
Python
|
mit
| 8,784 | 0.006717 |
#!/usr/bin/env python2
# Copyright (C) 2013:
# Gabes Jean, naparuba@gmail.com
# Pasche Sebastien, sebastien.pasche@leshop.ch
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
'''
This script is a check for lookup at memory consumption over ssh without
having an agent on the other side
'''
import os
import sys
import optparse
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
DEFAULT_WARNING = '10'
DEFAULT_CRITICAL = '60'
NTPQ_PATH=r"""ntpq"""
DEFAULT_DELAY_WARNING = '0.100' # 100 ms
DEFAULT_DELAY_CRITICAL = '0.150' # 150 ms
DEFAULT_OFFSET_WARNING = '0.0025' # 2.5 ms
DEFAULT_OFFSET_CRITICAL = '0.005' # 5ms
def get_ntp_sync(client):
# We are looking for a line like
# remote refid st t when poll reach delay offset jitter
#==============================================================================
# 127.127.1.0 .LOCL. 10 l 53 64 377 0.000 0.000 0.001
# *blabla blabla 3 u 909 1024 377 0.366 -3.200 5.268
#raw = r"""/usr/sbin/ntpq -p"""
raw = "%s -p" % NTPQ_PATH
stdin, stdout, stderr = client.exec_command("export LC_LANG=C && unset LANG && export PATH=$PATH:/usr/bin:/usr/sbin && %s" % raw)
errs = ''.join(l for l in stderr)
if errs:
print "Error: %s" % errs.strip()
client.close()
sys.exit(2)
ref_delay = None
for line in stdout:
line = line.strip()
# We want the line of the reference only
if not line or not line.startswith('*'):
continue
tmp = [e for e in line.split(' ') if e]
ref_delay = abs(float(tmp[8])) / 1000
# Before return, close the client
client.close()
return ref_delay
def get_chrony_sync(client):
# We are looking for a line like
#Reference ID : 195.141.190.190 (time.sunrise.net)
#Stratum : 3
#Ref time (UTC) : Fri Jun 28 09:03:22 2013
#System time : 0.000147811 seconds fast of NTP time
#Last offset : 0.000177244 seconds
#RMS offset : 0.000363876 seconds
#Frequency : 26.497 ppm slow
#Residual freq : 0.024 ppm
#Skew : 0.146 ppm
#Root delay : 0.008953 seconds
#Root dispersion : 0.027807 seconds
#Update interval : 1024.1 seconds
#Leap status : Normal
raw = r"""chronyc tracking"""
stdin, stdout, stderr = client.exec_command("export LC_LANG=C && unset LANG && %s" % raw)
errs = ''.join(l for l in stderr)
if errs:
print "Error: %s" % errs.strip()
client.close()
sys.exit(2)
delay = offset = None
for line in stdout:
line = line.strip()
tmp = line.split(':')
if len(tmp) != 2:
continue
if line.startswith('RMS offset'):
offset = float(tmp[1].strip().split(' ')[0])
if line.startswith('Root delay'):
delay = float(tmp[1].strip().split(' ')[0])
# Before return, close the client
client.close()
return delay, offset
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-H', '--hostname',
dest="hostname", help='Hostname to connect to')
parser.add_option('-p', '--port',
dest="port", type="int", default=22,
help='SSH port to connect to. Default : 22')
parser.add_option('-i', '--ssh-key',
dest="ssh_key_file",
help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('-u', '--user',
dest="user", help='remote use to use. By default shinken.')
parser.add_option('-P', '--passphrase',
dest="passphrase", help='SSH key passphrase. By default will use void')
parser.add_option('-w', '--warning',
dest="warning",
help='Warning delay for ntp, like 10. couple delay,offset value for chrony '
'0.100,0.0025')
parser.add_option('-c', '--critical',
dest="critical",
help='Warning delay for ntp, like 10. couple delay,offset value for chrony '
'0.150,0.005')
parser.add_option('-C', '--chrony', action='store_true',
dest="chrony", help='check Chrony instead of ntpd')
parser.add_option('-n', '--ntpq',
dest="ntpq", help="remote ntpq bianry path")
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
port = opts.port
ho
|
stname = opts.hostname or ''
ntpq = opts.ntpq
if ntpq:
NTPQ_PATH=ntpq
ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa')
user = opts.user or 'shinken'
passphrase = opts.passphrase or ''
chrony = opts.chrony
if not chrony:
# Try to get numeic warning/critical values
s_warning = opts.warning or DEFAULT_WARNING
s_critical = opts.critical or DEFAULT_CRITICA
|
L
warning, critical = schecks.get_warn_crit(s_warning, s_critical)
else:
if opts.warning:
warning_delay = float(opts.warning.split(',')[0])
warning_offset = float(opts.warning.split(',')[1])
else:
warning_delay = float(DEFAULT_DELAY_WARNING)
warning_offset = float(DEFAULT_OFFSET_WARNING)
if opts.critical:
critical_delay = float(opts.critical.split(',')[0])
critical_offset = float(opts.critical.split(',')[1])
else:
critical_delay = float(DEFAULT_DELAY_CRITICAL)
critical_offset = float(DEFAULT_OFFSET_CRITICAL)
# Ok now connect, and try to get values for memory
client = schecks.connect(hostname, port, ssh_key_file, passphrase, user)
if not chrony:
ref_delay = get_ntp_sync(client)
if ref_delay is None:
print "Warning : There is no sync ntp server"
sys.exit(1)
perfdata = "delay=%.2fs;%.2fs;%.2fs;;" % (ref_delay, warning, critical)
if ref_delay > critical:
print "Critical: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(2)
if ref_delay > warning:
print "Warning: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(2)
print "OK: ntp delay is %.2fs | %s" %(ref_delay, perfdata)
sys.exit(0)
else:
delay, offset = get_chrony_sync(client)
if delay is None or offset is None:
print "Warning : cannot get delay or offset value"
sys.exit(1)
perfdata = "delay=%.2fs;%.2fs;%.2fs;;" % (delay, warning_delay, critical_delay)
perfdata += "offset=%.4fs;%.4fs;%.4fs;;" % (offset, warning_offset, critical_offset)
if delay > critical_delay:
print "Critical: ntp/chrony delay
|
PeRDy/performance-tools
|
performance_tools/urls_flow/backends/base.py
|
Python
|
gpl-2.0
| 2,681 | 0.001865 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from abc import ABCMeta
import csv
from performance_tools.exceptions import ProgressBarException, ElasticsearchException
from performance_tools.utils.progress_bar import create_progress_bar
class BaseURLFlowBackend(object):
"""Collect URL flow from backend. URL Flow: Referrer, Request, Time.
It's necessary to implement extract_url_from_result and __iter__ methods.
"""
__metaclass__ = ABCMeta
def __init__(self):
self._total_hits = 0
def extract_url_from_result(self, result, reg
|
ex=None):
"""Extract origin url and destination url for each entry in result and con
|
struct a list with them.
:param result: results obtained from backend in each iteration.
:type result: object
:param regex: Regular expression to normalize id's in URL.
:type regex: re
:return: List of origin urls and destination urls.
:rtype: list
"""
raise NotImplementedError
def to_csv(self, filename, regex=None, verbose=2):
"""Save results as a CSV file.
:param filename: CSV output file.
:type filename: str
:raise: ValueError if not found any result.
"""
progress = None
try:
with open(filename, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['Referrer', 'Request', 'Time'])
count = 0
for result in self:
# Create progress bar or down verbose level
if verbose == 2 and progress is None:
try:
progress = create_progress_bar(self._total_hits, 'Extract URLs', 'url')
except ProgressBarException:
verbose = 1
# Write results to csv
rows = self.extract_url_from_result(result, regex)
writer.writerows(rows)
# Update progress
count += len(rows)
if verbose == 2:
progress.update(count if count < self._total_hits else self._total_hits)
elif verbose == 1:
print "{:d}/{:d} ({:d}%)".format(count, self._total_hits, count * 100 / self._total_hits)
except ZeroDivisionError:
raise ElasticsearchException("Search doesn't return any result")
except KeyError:
raise ElasticsearchException("Invalid result")
def __iter__(self):
"""Iterate over each result.
"""
raise NotImplementedError
|
megarcia/GT16_JGRA
|
source/process_NCEI_03_prcp_180d.py
|
Python
|
gpl-3.0
| 5,554 | 0 |
"""
Python script 'process_NCEI_03_prcp_180d.py'
by Matthew Garcia, PhD student
Dept. of Forest and Wildlife Ecology
University of Wisconsin - Madison
matt.e.garcia@gmail.com
Copyright (C) 2015-2016 by Matthew Garcia
Licensed Gnu GPL v3; see 'LICENSE_GnuGPLv3.txt' for complete terms
Send questions, bug reports, any related requests to matt.e.garcia@gmail.com
See also 'README.md', 'DISCLAIMER.txt', 'CITATION.txt', 'ACKNOWLEDGEMENTS.txt'
Treat others as you would be treated. Pay it forward. Valar dohaeris.
PURPOSE: Temporal calculation of PRCP 180-day accumulation
DEPENDENCIES: h5py, numpy
'process_NCEI_03_aux' module has its own requirements
USAGE: '$ python process_NCEI_03_prcp_180d.py NCEI_WLS_1983 1983 ./grids'
INPUT: copied '.h5' file from process_NCEI_03_preprocess.py
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
OUTPUT: updated daily '.h5' file with new accumulation grid
(with the naming convention 'grids/[YYYYMMDD]_NCEI_grids_2.h5')
year-end '.h5' and '.pickle' files with rolling accounted variable
"""
import sys
import datetime
import glob
import h5py as hdf
import numpy as np
from process_NCEI_03_aux import get_stn_lists, write_stn_lists, \
write_to_file, cube_sum
def message(char_string):
"""
prints a string to the terminal and flushes the buffer
"""
print char_string
sys.stdout.flush()
return
message(' ')
message('process_NCEI_03_prcp_180d.py started at %s' %
datetime.datetime.now().isoformat())
message(' ')
#
if len(sys.argv) < 4:
message('input warning: no input directory indicated,, using ./grids')
path = './grids'
else:
path = sys.argv[3]
#
if len(sys.argv) < 3:
message('input error: need year to process')
sys.exit(1)
else:
this_year = int(sys.argv[2])
#
if len(sys.argv) < 2:
message('input error: need prefix for weather data h5 file')
sys.exit(1)
else:
NCEIfname = sys.argv[1]
h5infname = '%s/../data/%s_processed.h5' % (path, NCEIfname)
#
message('reading dates information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
all_dates = np.copy(h5infile['dates'])
message('- information for %d total dates found' % len(all_dates))
dates = sorted([j for j in all_dates if int(j // 1E4) == this_year])
message('- processing %d dates in %d' % (len(dates), this_year))
message(' ')
#
prev_year = this_year - 1
vars_files = sorted(glob.glob('%s/*_year_end_prcp_180d.h5' % path))
use_vars_file = False
if len(vars_files) > 0:
for vars_file in vars_files:
if str(prev_year) in vars_file:
use_vars_file = True
varfname = vars_file
break
#
# if rolling accounting variable files exist to be carried over
# from previous year
if use_vars_file:
message('extracting prcp_180d datacube from %s' % varfname)
with hdf.File(varfname, 'r') as h5infile:
nrows = np.copy(h5infile['nrows'])
|
ncols = np.copy(h5infile['ncols'])
prcp_180d = np.copy(h5infile['prcp_180d'])
message('extracting station lists')
prcp_180d_stns = get_stn_lists(path, prev_year, 'prcp_180d_stns')
else: # otherwise, initialize the variable space(s)
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, dates[0])
message('extracting grid information from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
nrows = np.copy(h5infile['grid/nrow
|
s'])
ncols = np.copy(h5infile['grid/ncols'])
message('establishing prcp_180d datacube')
prcp_180d = np.zeros((180, nrows, ncols))
prcp_180d_stns = []
message(' ')
#
for date in dates:
h5infname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('extracting PRCP grid from %s' % h5infname)
with hdf.File(h5infname, 'r') as h5infile:
prcp_stns = np.copy(h5infile['stns/prcp_stns'])
prcp = np.copy(h5infile['grid_prcp'])
#
year = date // 10000
month = (date - (year * 10000)) // 100
day = date - (year * 10000) - (month * 100)
#
grid_prcp_180d, prcp_180d_stns_all, prcp_180d, prcp_180d_stns = \
cube_sum(180, prcp_180d, prcp, prcp_180d_stns, prcp_stns)
message('- calculated updated 180-day running precipitation total, \
mean %.1f' % np.mean(grid_prcp_180d))
#
h5outfname = '%s/%d_NCEI_grids_2.h5' % (path, date)
message('saving grids to %s' % h5outfname)
with hdf.File(h5outfname, 'r+') as h5outfile:
del h5outfile['meta/last_updated']
h5outfile.create_dataset('meta/last_updated',
data=datetime.datetime.now().isoformat())
del h5outfile['meta/at']
outstr = 'prcp_180d'
h5outfile.create_dataset('meta/at', data=outstr)
write_to_file(h5outfile, 'prcp_180d_sum', grid_prcp_180d,
'prcp_180d_stns', prcp_180d_stns_all)
message(' ')
#
# save rolling accounting variable for next year's run
varfname = '%s/%d_year_end_prcp_180d.h5' % (path, this_year)
message('saving variable datacube to %s' % varfname)
with hdf.File(varfname, 'w') as h5outfile:
h5outfile.create_dataset('nrows', data=nrows)
h5outfile.create_dataset('ncols', data=ncols)
h5outfile.create_dataset('prcp_180d', data=prcp_180d,
dtype=np.float32, compression='gzip')
message('saving station lists')
write_stn_lists(path, this_year, 'prcp_180d_stns', prcp_180d_stns)
#
message('process_NCEI_03_prcp_180d.py completed at %s' %
datetime.datetime.now().isoformat())
message(' ')
sys.exit(0)
# end process_NCEI_03_prcp_180d.py
|
leopoul/mupy
|
muparse/__init__.py
|
Python
|
apache-2.0
| 582 | 0.001718 |
# Copyright 2012 Leonidas Poulopoulos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License
|
.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Li
|
cense for the specific language governing permissions and
# limitations under the License.
|
patrickm/chromium.src
|
build/android/pylib/linker/test_runner.py
|
Python
|
bsd-3-clause
| 3,271 | 0.007031 |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs linker tests on a particular device."""
import logging
import os.path
import sys
import traceback
from pylib import constants
from pylib.base import base_test_result
from pylib.base import base_test_runner
from pylib.linker import test_case
from pylib.utils import apk_helper
# Name of the Android package to install for this to work.
_PACKAGE_NAME = 'ChromiumLinkerTest'
class LinkerExceptionTestResult(base_test_result.BaseTestResult):
"""Test result corresponding to a python exception in a host-custom test."""
def __init__(self, test_name, exc_info):
"""Constructs a LinkerExceptionTestResult object.
Args:
test_name: name of the test which raised an exception.
exc_info: exception info, ostensibly from sys.exc_info().
"""
exc_type, exc_value, exc_traceback = exc_info
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log_msg = 'Exception:\n' + trace_info
super(LinkerExceptionTestResult, self).__init__(
test_name,
base_test_result.ResultType.FAIL,
log = "%s %s" % (exc_type, log_msg))
class LinkerTestRunner(base_test_runner.BaseTestRunner):
"""Orchestrates running a set of linker tests.
Any Python exceptions in the tests are caught and translated into a failed
result, rather than being re-raised on the main thread.
"""
#override
def __init__(self, device, tool, push_deps, cleanup_test_files):
"""Creates a new LinkerTestRunner.
Args:
device: Attached android device.
tool: Name of the Valgrind tool.
push_deps: If True, push all dependencies to the device.
cleanup_test_files: Whether or not to cleanup test files on device.
"""
super(LinkerTestR
|
unner, self).__init__(device, tool,
|
push_deps,
cleanup_test_files)
#override
def InstallTestPackage(self):
apk_path = os.path.join(
constants.GetOutDirectory(), 'apks', '%s.apk' % _PACKAGE_NAME)
if not os.path.exists(apk_path):
raise Exception('%s not found, please build it' % apk_path)
package_name = apk_helper.GetPackageName(apk_path)
self.adb.ManagedInstall(apk_path, package_name)
#override
def RunTest(self, test):
"""Sets up and runs a test case.
Args:
test: An object which is ostensibly a subclass of LinkerTestCaseBase.
Returns:
A TestRunResults object which contains the result produced by the test
and, in the case of a failure, the test that should be retried.
"""
assert isinstance(test, test_case.LinkerTestCaseBase)
try:
results = test.Run(self.device)
except Exception:
logging.exception('Caught exception while trying to run test: ' +
test.tagged_name)
exc_info = sys.exc_info()
results = base_test_result.TestRunResults()
results.AddResult(LinkerExceptionTestResult(
test.tagged_name, exc_info))
if not results.DidRunPass():
return results, test
else:
return results, None
|
EverythingAbout/Python
|
Searches/linear_search.py
|
Python
|
mit
| 775 | 0.009032 |
def linear_search(lst,size,value):
i = 0
while i < size:
if lst[i] == value:
return i
i = i + 1
return -1
def main():
lst = [-31, 0, 1, 2, 2, 4, 65, 83, 99, 782]
size = len(lst)
original_list = ""
value = int(input("\nInput a value to search for: "))
print("\nOriginal Array: ")
for i in lst:
original_list += str(i) + " "
print(original_list)
print("\nLinear Search Big O Notation:\n--> Best Case: O(1)\n--> Average Case: O(n)\n--> Worst Case: O(n)\n")
index = linear_search(lst,size,value)
if index == -1:
print(str(value) + " was not found in that array\n"
|
)
else:
print(str(value) + " was found at index " + str(index))
if __name_
|
_ == '__main__':
main()
|
tocubed/noisily
|
tests/test_noise.py
|
Python
|
apache-2.0
| 2,684 | 0.007079 |
import pytest
import numpy as np
import noisily as ns
# FIXME This has got to be an abuse of fixtures, right?
@pytest.fixture(scope='module', params=[(1, 1), (37, 57), (128, 128)])
def indices2D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[(1, 1, 1), (29, 13, 31), (64, 64, 64)])
def indices3D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[(1, 1, 1, 1), (7, 11, 17, 13), (32, 32, 32, 32)])
def indices4D(request):
shape = request.param
return np.transpose(np.indices(shape))
@pytest.fixture(scope='module', params=[ns.perlin2D, ns.value2D, ns.open_simplex2D, ns.cell2D_range, ns.cell2D_range_inv, ns.cell2D_value, ns.cell2D_manhattan, n
|
s.cell2D_manhattan_inv,
|
ns.cell2D_manhattan_value])
def noise2D(request):
return request.param
@pytest.fixture(scope='module', params=[ns.perlin3D, ns.value3D, ns.open_simplex3D, ns.cell3D_range, ns.cell3D_range_inv, ns.cell3D_value, ns.cell3D_manhattan, ns.cell3D_manhattan_inv, ns.cell3D_manhattan_value])
def noise3D(request):
return request.param
@pytest.fixture(scope='module', params=[ns.perlin4D, ns.value4D, ns.open_simplex4D, ns.cell4D_range, ns.cell4D_range_inv, ns.cell4D_value, ns.cell4D_manhattan, ns.cell4D_manhattan_inv, ns.cell4D_manhattan_value])
def noise4D(request):
return request.param
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator2D(request, noise2D):
return ns.generator(noise2D, **request.param)
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator3D(request, noise3D):
return ns.generator(noise3D, **request.param)
@pytest.fixture(scope='module', params=[{'seed': 123}, {'period': 64}, {'seed': 12345, 'period': 16}])
def generator4D(request, noise4D):
return ns.generator(noise4D, **request.param)
def test_output2D(generator2D, indices2D):
output = generator2D(indices2D)
assert output.shape == indices2D.shape[:-1]
assert output.size == indices2D.size // 2
assert np.array_equal(output, generator2D(indices2D))
def test_output3D(generator3D, indices3D):
output = generator3D(indices3D)
assert output.shape == indices3D.shape[:-1]
assert output.size == indices3D.size // 3
assert np.array_equal(output, generator3D(indices3D))
def test_output4D(generator4D, indices4D):
output = generator4D(indices4D)
assert output.shape == indices4D.shape[:-1]
assert output.size == indices4D.size // 4
assert np.array_equal(output, generator4D(indices4D))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.