repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
robinedwards/django-neomodel
|
django_neomodel/apps.py
|
Python
|
mit
| 719 | 0.006954 |
from django.apps import AppConfig
from django.conf import settings
from neomodel import config
config.AUTO_INSTALL_LABELS =
|
False
class NeomodelConfig(AppConfig):
name = 'django_neomodel'
verbose_name = 'Django neomodel'
def read_settings(self):
config.DATABASE_URL = g
|
etattr(settings, 'NEOMODEL_NEO4J_BOLT_URL', config.DATABASE_URL)
config.FORCE_TIMEZONE = getattr(settings, 'NEOMODEL_FORCE_TIMEZONE', False)
config.ENCRYPTED_CONNECTION = getattr(settings, 'NEOMODEL_ENCRYPTED_CONNECTION', False)
config.MAX_CONNECTION_POOL_SIZE = getattr(settings, 'NEOMODEL_MAX_CONNECTION_POOL_SIZE', config.MAX_CONNECTION_POOL_SIZE)
def ready(self):
self.read_settings()
|
elvian/alfred
|
scripts/pollutionController.py
|
Python
|
mit
| 1,497 | 0.038076 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from bs4 import BeautifulSoup
from urllib.request import urlopen
def getSoupAQHI():
html = urlopen("http://www.aqhi.gov.hk/en/aqhi/past-24-hours-aqhi45fd.html?stationid=80")
soup = BeautifulSoup(html, "lxml")
return soup
def getLatestAQHI(dataTable):
aqhiTable = dataTable.findAll('tr')[1].findAll('td')
aqhi = {}
aqhi['dateTime'] = aqhiTable[0].text
aqhi['index'] = aqhiTable[1].text
return aqhi
def getRawAQICN():
source = urlopen("http://aqicn.org/?city=HongKong/Central/Western&widgetscript&lang=en&size=xsmall&id=56d839cf2ad
|
376.29520771")
source = source.read().decode('utf-8')
return source
def getLatestAQICN(source):
aqi = source.split("Air Pollution.")[1]
aqi = aqi.split("title")[1]
aqi = aqi.split("</div>")[0]
aqi = aqi.split(">")[1]
aqits = source.split("Updated on ")[1].strip()
aqits = aqits.split("<")[0]
aqhiData = {}
aqhiData['index'] = aqi
aqhiData['dateTime'] = aqits
return aqhiData
def getPollutionData():
soupAQHI = getSoupAQHI()
dataTableAQHI = soupAQHI.find('t
|
able', {'id' : 'dd_stnh24_table'})
aqhi = getLatestAQHI(dataTableAQHI)
rawAQICN = getRawAQICN()
aqicn = getLatestAQICN(rawAQICN)
data = {}
data['AQHI'] = aqhi['index']
data['AQHITS'] = aqhi['dateTime']
data['AQICN'] = aqicn['index']
data['AQICNTS'] = aqicn['dateTime']
return data
def testModule():
data = getPollutionData()
print(data['AQHI'] + " " + data['AQHITS'] + " " + data['AQICN'] + " " + data['AQICNTS'])
|
geier/alot
|
alot/addressbook/__init__.py
|
Python
|
gpl-3.0
| 1,232 | 0 |
# Copyright (C) 2011-2015 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
from __future__ import absolute_import
import re
import abc
class AddressbookError(Exception):
pass
class AddressBook(object):
"""can look up email addresses and realnames for contacts.
.. note::
This is an abstract class that leaves :meth:`get_contacts`
unspecified. See :class:`AbookAddressBook` and
:class:`ExternalAddressbook`
|
for implementations.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, ignorecase=True):
self.reflags = re.IGNORECASE if ignorecase else 0
@abc.abstractmethod
def get_contacts(self): # pragma no cover
"""list all contacts tuples in this abook as (name, email) tuples"""
return []
def lookup(self, query=''):
"""looks up all contacts where name or address match query"""
res = []
query = re.compile('.*%s.*'
|
% query, self.reflags)
for name, email in self.get_contacts():
if query.match(name) or query.match(email):
res.append((name, email))
return res
|
titilambert/home-assistant
|
tests/components/bayesian/test_binary_sensor.py
|
Python
|
apache-2.0
| 22,326 | 0.000851 |
"""The test for the bayesian sensor platform."""
import json
import unittest
from homeassistant.components.bayesian import binary_sensor as bayesian
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_UNKNOWN
from homeassistant.setup import async_setup_component, setup_component
fro
|
m tests.common import get_test_home_assistant
class TestBayesianBinarySensor(unittest.TestCase):
"""Test the threshold sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
|
"""Stop everything that was started."""
self.hass.stop()
def test_load_values_when_added_to_hass(self):
"""Test that sensor initializes with observations of relevant entities."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
def test_unknown_state_does_not_influence_probability(self):
"""Test that an unknown state does not change the output probability."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
self.hass.states.set("sensor.test_monitored", STATE_UNKNOWN)
self.hass.block_till_done()
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations") == []
def test_sensor_numeric_state(self):
"""Test sensor on numeric state platform observations."""
config = {
"binary_sensor": {
"platform": "bayesian",
"name": "Test_Binary",
"observations": [
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored",
"below": 10,
"above": 5,
"prob_given_true": 0.6,
},
{
"platform": "numeric_state",
"entity_id": "sensor.test_monitored1",
"below": 7,
"above": 5,
"prob_given_true": 0.9,
"prob_given_false": 0.1,
},
],
"prior": 0.2,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 6)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.6
assert state.attributes.get("observations")[1]["prob_given_true"] == 0.9
assert state.attributes.get("observations")[1]["prob_given_false"] == 0.1
assert round(abs(0.77 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", 6)
self.hass.states.set("sensor.test_monitored1", 0)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", 4)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", 15)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.state == "off"
def test_sensor_state(self):
"""Test sensor on state platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "state",
"entity_id": "sensor.test_monitored",
"to_state": "off",
"prob_given_true": 0.8,
"prob_given_false": 0.4,
}
],
"prior": 0.2,
"probability_threshold": 0.32,
}
}
assert setup_component(self.hass, "binary_sensor", config)
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
state = self.hass.states.get("binary_sensor.test_binary")
assert [] == state.attributes.get("observations")
assert 0.2 == state.attributes.get("probability")
assert state.state == "off"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8
assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
assert round(abs(0.33 - state.attributes.get("probability")), 7) == 0
assert state.state == "on"
self.hass.states.set("sensor.test_monitored", "off")
self.hass.block_till_done()
self.hass.states.set("sensor.test_monitored", "on")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_binary")
assert round(abs(0.2 - state.attributes.get("probability")), 7) == 0
assert state.state == "off"
def test_sensor_value_template(self):
"""Test sensor on template platform observations."""
config = {
"binary_sensor": {
"name": "Test_Binary",
"platform": "bayesian",
"observations": [
{
"platform": "template",
"value_template": "{{states
|
ShaneHarvey/mongo-connector
|
tests/test_oplog_manager_sharded.py
|
Python
|
apache-2.0
| 28,458 | 0.000738 |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import threading
import time
import bson
import pymongo
from pymongo.read_preferences import ReadPreference
from pymongo.write
|
_concern import WriteConcern
sys.path[0:0] = [""] # noqa
from mongo_connector.doc_managers.doc_manager_simulator import DocManager
from mongo_connector.locking_dict import LockingDict
from mongo_connector.namespace_config import NamespaceConfig
from mongo_connector.oplog_manager import OplogThre
|
ad
from mongo_connector.test_utils import (
assert_soon,
close_client,
ShardedCluster,
ShardedClusterSingle,
)
from mongo_connector.util import retry_until_ok, bson_ts_to_long
from tests import unittest, SkipTest
class ShardedClusterTestCase(unittest.TestCase):
def set_up_sharded_cluster(self, sharded_cluster_type):
""" Initialize the cluster:
Clean out the databases used by the tests
Make connections to mongos, mongods
Create and shard test collections
Create OplogThreads
"""
self.cluster = sharded_cluster_type().start()
# Connection to mongos
self.mongos_conn = self.cluster.client()
# Connections to the shards
self.shard1_conn = self.cluster.shards[0].client()
self.shard2_conn = self.cluster.shards[1].client()
# Wipe any test data
self.mongos_conn["test"]["mcsharded"].drop()
# Disable the balancer before creating the collection
self.mongos_conn.config.settings.update_one(
{"_id": "balancer"}, {"$set": {"stopped": True}}, upsert=True
)
# Create and shard the collection test.mcsharded on the "i" field
self.mongos_conn["test"]["mcsharded"].create_index("i")
self.mongos_conn.admin.command("enableSharding", "test")
self.mongos_conn.admin.command(
"shardCollection", "test.mcsharded", key={"i": 1}
)
# Pre-split the collection so that:
# i < 1000 lives on shard1
# i >= 1000 lives on shard2
self.mongos_conn.admin.command(
bson.SON([("split", "test.mcsharded"), ("middle", {"i": 1000})])
)
# Move chunks to their proper places
try:
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1}, to="demo-set-0"
)
except pymongo.errors.OperationFailure:
pass
try:
self.mongos_conn["admin"].command(
"moveChunk", "test.mcsharded", find={"i": 1000}, to="demo-set-1"
)
except pymongo.errors.OperationFailure:
pass
# Make sure chunks are distributed correctly
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1})
self.mongos_conn["test"]["mcsharded"].insert_one({"i": 1000})
def chunks_moved():
doc1 = self.shard1_conn.test.mcsharded.find_one()
doc2 = self.shard2_conn.test.mcsharded.find_one()
if None in (doc1, doc2):
return False
return doc1["i"] == 1 and doc2["i"] == 1000
assert_soon(
chunks_moved,
max_tries=120,
message="chunks not moved? doc1=%r, doc2=%r"
% (
self.shard1_conn.test.mcsharded.find_one(),
self.shard2_conn.test.mcsharded.find_one(),
),
)
self.mongos_conn.test.mcsharded.delete_many({})
# create a new oplog progress file
try:
os.unlink("oplog.timestamp")
except OSError:
pass
open("oplog.timestamp", "w").close()
# Oplog threads (oplog manager) for each shard
doc_manager = DocManager()
oplog_progress = LockingDict()
namespace_config = NamespaceConfig(
namespace_set=["test.mcsharded", "test.mcunsharded"]
)
self.opman1 = OplogThread(
primary_client=self.shard1_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn,
)
self.opman2 = OplogThread(
primary_client=self.shard2_conn,
doc_managers=(doc_manager,),
oplog_progress_dict=oplog_progress,
namespace_config=namespace_config,
mongos_client=self.mongos_conn,
)
def tearDown(self):
try:
self.opman1.join()
except RuntimeError:
pass # thread may not have been started
try:
self.opman2.join()
except RuntimeError:
pass # thread may not have been started
close_client(self.mongos_conn)
close_client(self.shard1_conn)
close_client(self.shard2_conn)
self.cluster.stop()
class TestOplogManagerShardedSingle(ShardedClusterTestCase):
"""Defines all test cases for OplogThreads running on a sharded
cluster with single node replica sets.
"""
def setUp(self):
self.set_up_sharded_cluster(ShardedClusterSingle)
def test_get_oplog_cursor(self):
"""Test the get_oplog_cursor method"""
# timestamp = None
cursor1 = self.opman1.get_oplog_cursor(None)
oplog1 = self.shard1_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}})
self.assertEqual(list(cursor1), list(oplog1))
cursor2 = self.opman2.get_oplog_cursor(None)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find({"op": {"$ne": "n"}})
self.assertEqual(list(cursor2), list(oplog2))
# earliest entry is the only one at/after timestamp
doc = {"ts": bson.Timestamp(1000, 0), "i": 1}
self.mongos_conn["test"]["mcsharded"].insert_one(doc)
latest_timestamp = self.opman1.get_last_oplog_timestamp()
cursor = self.opman1.get_oplog_cursor(latest_timestamp)
self.assertNotEqual(cursor, None)
entries = list(cursor)
self.assertEqual(len(entries), 1)
next_entry_id = entries[0]["o"]["_id"]
retrieved = self.mongos_conn.test.mcsharded.find_one(next_entry_id)
self.assertEqual(retrieved, doc)
# many entries before and after timestamp
for i in range(2, 2002):
self.mongos_conn["test"]["mcsharded"].insert_one({"i": i})
oplog1 = self.shard1_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
oplog2 = self.shard2_conn["local"]["oplog.rs"].find(
sort=[("ts", pymongo.ASCENDING)]
)
# oplogs should have records for inserts performed, plus
# various other messages
oplog1_count = oplog1.count()
oplog2_count = oplog2.count()
self.assertGreaterEqual(oplog1_count, 998)
self.assertGreaterEqual(oplog2_count, 1002)
pivot1 = oplog1.skip(400).limit(-1)[0]
pivot2 = oplog2.skip(400).limit(-1)[0]
cursor1 = self.opman1.get_oplog_cursor(pivot1["ts"])
cursor2 = self.opman2.get_oplog_cursor(pivot2["ts"])
self.assertEqual(cursor1.count(), oplog1_count - 400)
self.assertEqual(cursor2.count(), oplog2_count - 400)
def test_get_last_oplog_timestamp(self):
"""Test the get_last_oplog_timestamp method"""
# "empty" the oplog
self.opman1.oplog = self.shard1_conn["test"]["emptycollection"]
self.opman2.oplog = self.shard2_conn["test"]["emptycollection"]
self.assertEqual(self.opman1.get_last_oplog_timestamp(), None)
self.assertEqual(self.opman2.get_last_oplog_timestamp(),
|
ZUKSev/ZUKS-Controller
|
server/server/wsgi.py
|
Python
|
gpl-3.0
| 1,072 | 0.000933 |
# This file is part of ZUKS-Controller.
#
# ZUKS-Controller is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZUKS-Controller is distributed in the hope that it will be useful,
# but WIT
|
HOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public L
|
icense
# along with ZUKS-Controller. If not, see <http://www.gnu.org/licenses/>.
"""
WSGI config for server project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "server.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
OCA/partner-contact
|
partner_identification/tests/test_partner_identification.py
|
Python
|
agpl-3.0
| 4,638 | 0.001509 |
# Copyright 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http:/
|
/www.gnu.org/licenses/agpl.html).
from psycopg2._psycopg import IntegrityError
from odoo.exceptions import UserError, ValidationError
from odoo.te
|
sts import common
from odoo.tools import mute_logger
class TestPartnerIdentificationBase(common.TransactionCase):
def test_create_id_category(self):
partner_id_category = self.env["res.partner.id_category"].create(
{"code": "id_code", "name": "id_name"}
)
self.assertEqual(partner_id_category.name, "id_name")
self.assertEqual(partner_id_category.code, "id_code")
@mute_logger("odoo.sql_db")
def test_update_partner_with_no_category(self):
partner_1 = self.env.ref("base.res_partner_1")
self.assertEqual(len(partner_1.id_numbers), 0)
# create without required category
with self.assertRaises(IntegrityError):
partner_1.write({"id_numbers": [(0, 0, {"name": "1234"})]})
def test_update_partner_with_category(self):
partner_1 = self.env.ref("base.res_partner_1")
partner_id_category = self.env["res.partner.id_category"].create(
{"code": "new_code", "name": "new_name"}
)
# successful creation
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
self.assertEqual(len(partner_1.id_numbers), 1)
self.assertEqual(partner_1.id_numbers.name, "1234")
# delete
partner_1.write({"id_numbers": [(5, 0, 0)]})
self.assertEqual(len(partner_1.id_numbers), 0)
class TestPartnerCategoryValidation(common.TransactionCase):
def test_partner_id_number_validation(self):
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234':
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1")
with self.assertRaises(ValidationError), self.cr.savepoint():
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "01234", "category_id": partner_id_category.id})
]
}
)
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
self.assertEqual(len(partner_1.id_numbers), 1)
self.assertEqual(partner_1.id_numbers.name, "1234")
partner_id_category2 = self.env["res.partner.id_category"].create(
{
"code": "id_code2",
"name": "id_name2",
"validation_code": """
if id_number.name != '1235':
failed = True
""",
}
)
# check that the constrains is also checked when we change the
# associated category
with self.assertRaises(ValidationError), self.cr.savepoint():
partner_1.id_numbers.write({"category_id": partner_id_category2.id})
def test_bad_validation_code(self):
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234' # missing :
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1")
with self.assertRaises(UserError):
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
def test_bad_validation_code_override(self):
""" It should allow a bad validation code if context overrides. """
partner_id_category = self.env["res.partner.id_category"].create(
{
"code": "id_code",
"name": "id_name",
"validation_code": """
if id_number.name != '1234' # missing :
failed = True
""",
}
)
partner_1 = self.env.ref("base.res_partner_1").with_context(id_no_validate=True)
partner_1.write(
{
"id_numbers": [
(0, 0, {"name": "1234", "category_id": partner_id_category.id})
]
}
)
|
corriander/python-sigfig
|
sigfig/sigfig.py
|
Python
|
mit
| 2,902 | 0.033425 |
from math import floor, log10
def round_(x, n):
"""Round a float, x, to n significant figures.
Caution should be applied when performing this operation.
Significant figures are an implication of precision; arbitrarily
truncating floats mid-calculation is probably not Good Practice in
almost all cases.
Rounding off a float to n s.f. results in a float. Floats are, in
general, approximations of decimal numbers. The point here is that
it is very possible to end up with an inexact number:
>>> roundsf(0.0012395, 3)
0.00124
>>> roundsf(0.0012315, 3)
0.0012300000000000002
Basically, rounding in this way probably doesn't do what you want
it to.
"""
n = int(n)
x = float(x)
if x == 0: return 0
e = floor(log10(abs(x)) - n + 1) # exponent, 10 ** e
shifted_dp = x / (10 ** e) # decimal place shifted n d.p.
return round(shifted_dp) * (10 ** e) # round and revert
def string(x, n):
"""Convert a float, x, to a string with n significant figures.
This function returns a decimal string representation of a float
|
to a specified number of significant figures.
>>> create_string(9.80665, 3)
'9.81'
>>> create_string(0.0120076, 3)
'0.0120'
>>> create_string(100000, 5)
'100000'
Note the last representation is, without context, ambiguous. This
is a good reason to use scienti
|
fic notation, but it's not always
appropriate.
Note
----
Performing this operation as a set of string operations arguably
makes more sense than a mathematical operation conceptually. It's
the presentation of the number that is being changed here, not the
number itself (which is in turn only approximated by a float).
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
# retrieve the significand and exponent from the S.N. form
s, e = ''.join(( '{:.', str(n - 1), 'e}')).format(x).split('e')
e = int(e) # might as well coerce now
if e == 0:
# Significand requires no adjustment
return s
s = s.replace('.', '')
if e < 0:
# Placeholder zeros need creating
return ''.join(('0.', '0' * (abs(e) - 1), s))
else:
# Decimal place need shifting
s += '0' * (e - n + 1) # s now has correct s.f.
i = e + 1
sep = ''
if i < n: sep = '.'
if s[0] is '-': i += 1
return sep.join((s[:i], s[i:]))
def scientific(x, n):
"""Represent a float in scientific notation.
This function is merely a wrapper around the 'e' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:.', str(n - 1), 'e}')).format(x)
def general(x, n):
"""Represent a float in general form.
This function is merely a wrapper around the 'g' type flag in the
formatting specification.
"""
n = int(n)
x = float(x)
if n < 1: raise ValueError("1+ significant digits required.")
return ''.join(('{:#.', str(n), 'g}')).format(x)
|
OCA/server-tools
|
base_sequence_option/__init__.py
|
Python
|
agpl-3.0
| 150 | 0 |
# Copyright 2021 Ecosoft Co., Ltd. (http://ecosoft.c
|
o.th)
# Licens
|
e LGPL-3.0 or later (https://www.gnu.org/licenses/lgpl.html).
from . import models
|
camlorn/clang_helper
|
clang_helper/extract_macros.py
|
Python
|
gpl-3.0
| 2,943 | 0.030581 |
"""Utilities for extracting macros and preprocessor definitions from C files. Depends on Clang's python bindings.
Note that cursors have children, which are also cursors. They are not iterators, they are nodes in a tree.
Everything here uses iterators. The general strategy is to have multiple passes over the same cursor to extract everything needed, and this entire file can be viewed as filters over raw cursors."""
import itertools
import clang.cindex as cindex
import re
from . flatten_cursor import flatten_cursor
from .extracted_features import Macro
def extract_preprocessor_cursors(cursor):
"""Get all preprocessor definitions from a cursor."""
for i in flatten_cursor(cursor):
if i.kind.is_preprocessing():
yield i
def extract_macro_cursors(c):
"""Get all macros from a cursor."""
return itertools.ifilter(lambda x: x.kind == cindex.CursorKind.MACRO_DEFINITION, extract_preprocessor_cursors(c))
def transform_token(token):
"""Returns a string representation of token. If it is a C numeric constant, it is transformed into a python numeric constant."""
#these are from python docs.
find_float = "[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
find_int = "[-+]?(0[xX][\dA-Fa-f]+|0[0-7]*|\d+)"
untransformed_string = token.spelling
try_find_int = re.match(find_int, untransformed_string)
try_find_float = re.match(find_float, untransformed_string)
new_string = untransformed_string
if try_find_int is not None:
new_string = try_find_int.group()
elif try_find_float is not None:
new_string = try_find_float.group()
return new_string
def extract_macros(c):
"""Uses eval and some regexp magic and general hackiness to extract as many macros as it possibly can.
Returns a tuple. The first element is a list of Macro objects; the second is a list of strings that name macros we couldn't handle."""
handled_macros = []
currently_known_macros = dict()
failed_macros = []
possible_macro_cursors = extract_macro_cursors(c)
#begin the general awfulness.
for i in possible_macro_cursors:
desired_tokens = list(i.get_tokens())[:-1] #the last one is something we do not need.
name_token = desired_tokens[0]
name = name_token.spelling
desired_t
|
okens = desired_tokens[1:]
if len(desired_tokens) == 0:
#the value of this macro is none.
value = None
m = Macro
|
(name = name, value = value, cursor = i)
handled_macros.append(m)
currently_known_macros[m.name] = m.value
continue
#otherwise, we have to do some hacky stuff.
token_strings = [transform_token(j) for j in desired_tokens]
eval_string = "".join(token_strings)
try:
value = eval(eval_string, currently_known_macros)
if isinstance(value, type):
raise ValueError("Value resolved to class, not instance.")
except:
failed_macros.append(name)
continue
m = Macro(value = value, name = name, cursor = i)
handled_macros.append(m)
currently_known_macros[m.name] = m.value
return (handled_macros, failed_macros)
|
JohnGriffiths/nipype
|
nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py
|
Python
|
bsd-3-clause
| 2,611 | 0.02298 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.preprocess import ApplyVolTransform
def test_ApplyVolTransform_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fs_target=dict(argstr='--fstarg',
mandatory=True,
requires=['reg_file'],
xor=('target_file', 'tal', 'fs_target'),
),
fsl_reg_file=dict(argstr='--fsl %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interp=dict(argstr='--interp %s',
),
inverse=dict(argstr='--inv',
),
invert_morph=dict(argstr='--inv-morph',
requires=['m3z_file'],
),
m3z_file=dict(argstr='--m3z %s',
),
no_ded_m3z_path=dict(argstr='--noDefM3zPath',
requires=['m3z_file'],
),
no_resample=dict(argstr='--no-resample',
),
reg_file=dict(argstr='--reg %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
reg_header=dict(argstr='--regheader',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
source_file=dict(argstr='--mov %s',
copyfile=False,
mandatory=True,
),
subject=dict(argstr='--s %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
subjects_dir=dict(),
tal=dict(argstr='--tal',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
tal_resolution=dict(argstr='--talres %.10f',
),
target_file=dict(argstr='--targ %s',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
terminal_output=dict(nohash=True,
),
transformed_file=dict(argstr='--o %s',
genfile=True,
),
xfm_reg_file=dict(argstr='--xfm %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
)
inputs = ApplyVolTransform.input_spec()
for key, metadata in inpu
|
t_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyVolTransform_outputs():
output_map = dict(transformed_file=dict(),
)
outputs = ApplyVolTransform.output_spec()
for key, metadata i
|
n output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
ClearCorp-dev/odoo-costa-rica
|
l10n_cr_account_banking_cr_bcr/__init__.py
|
Python
|
agpl-3.0
| 1,054 | 0 |
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 credativ Ltd (<http://www.credativ.co.uk>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without eve
|
n the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero
|
General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bcr_format
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sburnett/seattle
|
repy/tests/ut_repytests_global.py
|
Python
|
mit
| 47 | 0.042553 |
#pragma error
#pragma repy
global foo
f
|
oo
|
= 2
|
rec/echomesh
|
code/python/experiments/FixColors.py
|
Python
|
mit
| 762 | 0.01706 |
import re
LINE_RE = re.compile(r'\s*namer.add\("(.*)", 0x(.*)\);.*')
with open('/tmp/colors.txt') as f:
data = {}
for line in f:
matches = LINE_RE.match(line)
if mat
|
ches:
color, number = matches.groups()
if len(number) < 8:
number = 'ff%s' % number
data[color] = number
else:
print 'ERROR: don\'t understand:', line
inverse = {}
dupes = {}
for color, nu
|
mber in sorted(data.iteritems()):
if number in inverse:
dupes.setdefault(number, []).append(color)
else:
inverse[number] = color
print ' namer.add("%s", 0x%s);' % (color, number)
if dupes:
print dupes
for number, colors in dupes.iteritems():
print '%s -> %s (originally %s)' % (number, colors, inverse[number])
|
riolet/SAM
|
sam/models/details.py
|
Python
|
gpl-3.0
| 16,289 | 0.001719 |
import web
import sam.common
import sam.models.links
class Details:
def __init__(self, db, subscription, ds, address, timestamp_range=None, port=None, page_size=50):
self.db = db
self.sub = subscription
self.table_nodes = "s{acct}_Nodes".format(acct=self.sub)
self.table_links = "s{acct}_ds{id}_Links".format(acct=self.sub, id=ds)
self.table_links_in = "s{acct}_ds{id}_LinksIn".format(acct=self.sub, id=ds)
self.table_links_out = "s{acct}_ds{id}_LinksOut".format(acct=self.sub, id=ds)
self.ds = ds
self.ip_start, self.ip_end = sam.common.determine_range_string(address)
self.page_size = page_size
self.port = port
if timestamp_range:
self.time_range = timestamp_range
else:
linksModel = sam.models.links.Links(db, self.sub, self.ds)
tr = linksModel.get_timerange()
self.time_range = (tr['min'], tr['max'])
if self.db.dbname == 'mysql':
self.elapsed = '(UNIX_TIMESTAMP(MAX(timestamp)) - UNIX_TIMESTAMP(MIN(timestamp)))'
self.divop = 'DIV'
else:
self.elapsed = '(MAX(timestamp) - MIN(timestamp))'
self.divop = '/'
sam.common.sqlite_udf(self.db)
def get_metadata(self):
qvars = {"start": self.ip_start, "end": self.ip_end}
# TODO: seconds has a magic number 300 added to account for DB time quantization.
query = """
SELECT {address_q} AS 'address'
, COALESCE(n.hostname, '') AS 'hostname'
, COALESCE(l_out.unique_out_ip, 0) AS 'unique_out_ip'
, COALESCE(l_out.unique_out_conn, 0) AS 'unique_out_conn'
, COALESCE(l_out.total_out, 0) AS 'total_out'
, COALESCE(l_out.b_s, 0) AS 'out_bytes_sent'
, COALESCE(l_out.b_r, 0) AS 'out_bytes_received'
, COALESCE(l_out.max_bps, 0) AS 'out_max_bps'
, COALESCE(l_out.sum_b * 1.0 / l_out.sum_duration, 0) AS 'out_avg_bps'
, COALESCE(l_out.p_s, 0) AS 'out_packets_sent'
, COALESCE(l_out.p_r, 0) AS 'out_packets_received'
, COALESCE(l_out.sum_duration * 1.0 / l_out.total_out, 0) AS 'out_duration'
, COALESCE(l_in.unique_in_ip, 0) AS 'unique_in_ip'
, COALESCE(l_in.unique_in_conn, 0) AS 'unique_in_conn'
, COALESCE(l_in.total_in, 0) AS 'total_in'
, COALESCE(l_in.b_s, 0) AS 'in_bytes_sent'
, COALESCE(l_in.b_r, 0) AS 'in_bytes_received'
, COALESCE(l_in.max_bps, 0) AS 'in_max_bps'
, COALESCE(l_in.sum_b * 1.0 / l_in.sum_duration, 0) AS 'in_avg_bps'
, COALESCE(l_in.p_s, 0) AS 'in_packets_sent'
, COALESCE(l_in.p_r, 0) AS 'in_packets_received'
, COALESCE(l_in.sum_duration * 1.0 / l_in.total_in, 0) AS 'in_duration'
, COALESCE(l_in.ports_used, 0) AS 'ports_used'
, children.endpoints AS 'endpoints'
, COALESCE(t.seconds, 0) + 300 AS 'seconds'
, (COALESCE(l_in.sum_b, 0) + COALESCE(l_out.sum_b, 0)) / (COALESCE(t.seconds, 0) + 300) AS 'overall_bps'
, COALESCE(l_in.protocol, "") AS 'in_protocols'
, COALESCE(l_out.protocol, "") AS 'out_protocols'
FROM (
SELECT ipstart, subnet, alias AS 'hostname'
FROM {nodes_table}
WHERE ipstart = $start AND ipend = $end
) AS n
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT dst) AS 'unique_out_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE src BETWEEN $start AND $end) AS `temp1`) AS 'unique_out_conn'
, SUM(links) AS 'total_out'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_receive
|
d) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE src BETWEEN $start AND $end
GROUP BY 's1'
) AS l_out
|
ON n.ipstart = l_out.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(DISTINCT src) AS 'unique_in_ip'
, (SELECT COUNT(1) FROM (SELECT DISTINCT src, dst, port FROM {links_table} WHERE dst BETWEEN $start AND $end) AS `temp2`) AS 'unique_in_conn'
, SUM(links) AS 'total_in'
, SUM(bytes_sent) AS 'b_s'
, SUM(bytes_received) AS 'b_r'
, MAX((bytes_sent + bytes_received) * 1.0 / duration) AS 'max_bps'
, SUM(bytes_sent + bytes_received) AS 'sum_b'
, SUM(packets_sent) AS 'p_s'
, SUM(packets_received) AS 'p_r'
, SUM(duration * links) AS 'sum_duration'
, COUNT(DISTINCT port) AS 'ports_used'
, GROUP_CONCAT(DISTINCT protocol) AS 'protocol'
FROM {links_table}
WHERE dst BETWEEN $start AND $end
GROUP BY 's1'
) AS l_in
ON n.ipstart = l_in.s1
LEFT JOIN (
SELECT $start AS 's1'
, COUNT(ipstart) AS 'endpoints'
FROM {nodes_table}
WHERE ipstart = ipend AND ipstart BETWEEN $start AND $end
) AS children
ON n.ipstart = children.s1
LEFT JOIN (
SELECT $start AS 's1'
, {elapsed} AS 'seconds'
FROM {links_table}
GROUP BY 's1'
) AS t
ON n.ipstart = t.s1
LIMIT 1;
""".format(
address_q=sam.common.db_concat(self.db, 'decodeIP(n.ipstart)', "'/'", 'n.subnet'),
elapsed=self.elapsed,
nodes_table=self.table_nodes,
links_table=self.table_links)
results = self.db.query(query, vars=qvars)
first = results.first()
if first:
return first
else:
return {}
def build_where_clause(self, timestamp_range=None, port=None, protocol=None, rounding=True):
"""
Build a WHERE SQL clause that covers basic timerange, port, and protocol filtering.
:param timestamp_range: start and end times as unix timestamps (integers). Default is all time.
:type timestamp_range: tuple[int, int]
:param port: exclusively report traffic destined for this port, if specified.
:type port: int or str
:param protocol: exclusively report traffic using this protocol
:type protocol: str
:param rounding: round each time stamp to the nearest quantization mark. (db records are quantized for consiceness)
:type rounding: bool
:return: String SQL clause
:rtype: str
"""
clauses = []
t_start = 0
t_end = 0
if timestamp_range:
t_start = timestamp_range[0]
t_end = timestamp_range[1]
if rounding:
# rounding to 5 minutes, for use with the Syslog table
if t_start > 150:
t_start -= 150
if t_end <= 2 ** 31 - 150:
t_end += 149
if self.db.dbname == 'sqlite':
clauses.append("timestamp BETWEEN $tstart AND $tend")
else:
clauses.append("timestamp BETWEEN FROM_UNIXTIME($tstart) AND FROM_UNIXTIME($tend)")
if port:
clauses.append("port = $port")
if protocol:
clauses.append("protocols LIKE $protocol")
protocol = "%{0}%".format(protocol)
qvars = {'tstart': t_start, 'tend': t_end, 'port': port, 'protocol': protocol}
where = str(web.d
|
felipedau/pyaxo
|
examples/transfer.py
|
Python
|
gpl-3.0
| 4,487 | 0.002452 |
#!/usr/bin/env python
"""
This file transfer example demonstrates a couple of things:
1) Transferring files using Axolotl to encrypt each block of the transfer
with a different ephemeral key.
2) Using a context manager with Axolotl.
The utility will prompt you for the location of the Axolotl key database
and the blocksize. The blocksize must be chosen so that the maximum number
of blocks is <= 255. Security is optimized by a larger number of blocks,
and transfer speed is optimized by a smaller number of blocks. If you
choose incorrectly, the utility will prompt you with a recommendation.
Key databases can be generated using e.g the init_conversation.py utility.
Syntax for receive is: ./transfer.py -r
Syntax for send is: ./transfer.py -s <filename> <target hostname or ip address>
The end of packet (EOP) and end of file (EOF) markers I use are pretty simple,
but unlikely to show up in ciphertext.
"""
from pyaxo import Axolotl
from contextlib import contextmanager
import sys
import socket
import os
try:
location = raw_input('Database directory (default ~/.bin)? ').strip()
if location == '': location = '~/.bin'
location = os.path.expanduser(location)
if sys.argv[1] == '-s':
file_name = sys.argv[2]
host = sys.argv[3]
size = int(raw_input('File transfer block size? '))
port = 50000
except IndexError:
print 'Usage: ' + sys.argv[0] + ' -(s,r) [<filename> <host>]'
exit()
backlog = 1
@contextmanager
def socketcontext(*args, **kwargs):
s = socket.socket(*args, **kwargs)
yield s
s.close()
@contextmanager
def axo(my_name, other_name, dbname, dbpassphrase):
a = Axolotl(my_name, dbname=dbname, dbpassphrase=dbpassphrase)
a.loadState(my_name, other_name)
yield a
a.saveState()
if sys.argv[1] == '-s':
# open socket and send data
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.connect((host, port))
with axo('send', 'receive', dbname=location+'/send.db', dbpassphrase='1') as a:
with open(file_name, 'rb') as f:
plaintext = f.read()
plainlength = len(plaintext)
while plainlength/size > 253:
print 'File too large to transfer - increase size parameter'
print 'Recommended >= ' + str(plainlength/128) + ' bytes per block'
size = int(raw_input('File transfer block size? '))
plaintext = str(len(file_name)).zfill(2) + file_name + plaintext
while len(plaintext) > size:
msg = plaintext[:size]
if msg == '': break
plaintext = plaintext[size:]
ciphertext = a.encrypt(msg)
s.send(ciphertext + 'EOP')
if len(plaintext) != 0:
ciphertext = a.encrypt(plaintext)
s.send(ciphertext + 'EOF')
# receive confirmation
confirmation = s.recv(1024)
if a.decrypt(confirmation) == 'Got It!':
print 'Transfer confirmed!'
else:
print 'Transfer not confirmed...'
if sys.argv[1] == '-r':
# open socket and receive data
with socketcontext(socket.AF_INET, socket.SOCK_STREAM) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
host = ''
s.bind((host, port))
s.listen(backlog)
client, address = s.accept()
with axo('receive', 'send', dbname=location+'/receive.db', dbpassphrase='1') as a:
plaintext = ''
ciphertext = ''
while True:
newtext = client.recv(1024)
ciphertext += newtext
if ciphertext[-3:] == 'EOF': break
|
if ciphertext == '':
print 'nothing received'
exit()
cipherlist = ciphertext.split('EOP')
for item in cipherlist:
if item[-3:] == 'EOF':
item = item[:-3]
plaintext += a.decrypt(item)
filenamelength = int(plaintext[:2])
file_name =
|
plaintext[2:2+filenamelength]
with open(file_name, 'wb') as f:
f.write(plaintext[2+filenamelength:])
# send confirmation
reply = a.encrypt('Got It!')
client.send(reply)
print file_name + ' received'
|
nrc/rustc-perf
|
collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/cdsl/formats.py
|
Python
|
mit
| 10,052 | 0 |
"""Classes for describing instruction formats."""
from __future__ import absolute_import
from .operands import OperandKind, VALUE, VARIABLE_ARGS
from .operands import Operand # noqa
# The typing module is only required by mypy, and we don't use these imports
# outside type comments.
try:
from typing import Dict, List, Tuple, Union, Any, Sequence, Iterable # noqa
except ImportError:
pass
class InstructionContext(object):
"""
Most instruction predicates refer to immediate fields of a specific
instruction format, so their `predicate_context()` method returns the
specific instruction format.
Predicates that only care about the types of SSA values are independent of
the instruction format. They can be evaluated in the context of any
instruction.
The singleton `InstructionContext` class serves as the predicate context
for these predicates.
"""
def __init__(self):
# type: () -> None
self.name = 'inst'
# Singleton instance.
instruction_context = InstructionContext()
class InstructionFormat(object):
"""
Every instruction opcode has a corresponding instruction format which
determines the number of operands and their kinds. Instruction formats are
identified structurally, i.e., the format of an instruction is derived from
the kinds of operands used in its declaration.
The instruction format stores two separate lists of operands: Immediates
and values. Immediate operands (including entity references) are
represented as explicit members in the `InstructionData` variants. The
value operands are stored differently, depending on how many there are.
Beyond a certain point, instruction formats switch to an external value
list for storing value arguments. Value lists can hold an arbitrary number
of values.
All instruction formats must be predefined in the
:py:mod:`cranelift.formats` module.
:param kinds: List of `OperandKind` objects describing the operands.
:param name: Instruction format name in CamelCase. This is used as a Rust
variant name in both the `InstructionData` and `InstructionFormat`
enums.
:param typevar_operand: Index of the value input operand that is used to
infer the controlling type variable. By default, this is `0`, the first
`value` operand. The index is relative to the values only, ignoring
immediate operands.
"""
# Map (imm_kinds, num_value_operands) -> format
_registry = dict() # type: Dict[Tuple[Tuple[OperandKind, ...], int, bool], InstructionFormat] # noqa
# All existing formats.
all_formats = list() # type: List[InstructionFormat]
def __init__(self, *kinds, **kwargs):
# type: (*Union[OperandKind, Tuple[str, OperandKind]], **Any) -> None # noqa
self.name = kwargs.get('name', None) # type: str
self.parent = instruction_context
# The number of value operands stored in the format, or `None` when
# `has_value_list` is set.
self.num_value_operands = 0
# Does this format use a value list for storing value operands?
self.has_value_list = False
# Operand fields for the immediate operands. All other instruction
# operands are values or variable argument lists. They are all handled
# specially.
self.imm_fields = tuple(self._process_member_names(kinds))
# The typevar_operand argument must point to a 'value' operand.
self.typevar_operand = kwargs.get('typevar_operand', None) # type: int
if self.typevar_operand is not None:
if not self.has_value_list:
assert self.typevar_operand < self.num_value_operands, \
"typevar_operand must indicate a 'value' operand"
elif self.has_value_list or self.num_value_operands > 0:
# Default to the first 'value' operand, if there is one.
self.typevar_operand = 0
# Compute a signature for the global registry.
imm_kinds = tuple(f.kind for f in self.imm_fields)
sig = (imm_kinds, self.num_value_operands, self.has_value_list)
if sig in InstructionFormat._registry:
raise RuntimeError(
"Format '{}' has the same signature as existing format '{}'"
.format(self.name, InstructionFormat._registry[sig]))
InstructionFormat._registry[sig] = self
InstructionFormat.all_formats.append(self)
def args(self):
# type: () -> FormatField
"""
Provides a ValueListField, which is derived from FormatField,
corresponding to the full ValueList of the instruction format. This
is useful for creating predicates for instructions which use variadic
arguments.
"""
if self.has_value_list:
return ValueListField(self)
return None
def _process_member_names(self, kinds):
# type: (Sequence[Union[OperandKind, Tuple[str, OperandKind]]]) -> Iterable[FormatField] # noqa
"""
Extract names of all the immediate operands in the kinds tuple.
Each entry is either an `OperandKind` instance, or a `(member, kind)`
pair. The member names correspond to members in the Rust
`InstructionData` data structure.
Updates the fields `self.num_value_operands` and `self.has_value_list`.
Yields the immediate operand fields.
"""
inum = 0
for arg in kinds:
if isinstance(arg, OperandKind):
member = arg.default_member
k = arg
else:
member, k = arg
#
|
We define 'immediate' as not a value or variable arguments.
if k is VALUE:
self.num_value_operands += 1
e
|
lif k is VARIABLE_ARGS:
self.has_value_list = True
else:
yield FormatField(self, inum, k, member)
inum += 1
def __str__(self):
# type: () -> str
args = ', '.join(
'{}: {}'.format(f.member, f.kind) for f in self.imm_fields)
return '{}(imms=({}), vals={})'.format(
self.name, args, self.num_value_operands)
def __getattr__(self, attr):
# type: (str) -> FormatField
"""
Make immediate instruction format members available as attributes.
Each non-value format member becomes a corresponding `FormatField`
attribute.
"""
for f in self.imm_fields:
if f.member == attr:
# Cache this field attribute so we won't have to search again.
setattr(self, attr, f)
return f
raise AttributeError(
'{} is neither a {} member or a '
.format(attr, self.name) +
'normal InstructionFormat attribute')
@staticmethod
def lookup(ins, outs):
# type: (Sequence[Operand], Sequence[Operand]) -> InstructionFormat
"""
Find an existing instruction format that matches the given lists of
instruction inputs and outputs.
The `ins` and `outs` arguments correspond to the
:py:class:`Instruction` arguments of the same name, except they must be
tuples of :py:`Operand` objects.
"""
# Construct a signature.
imm_kinds = tuple(op.kind for op in ins if op.is_immediate())
num_values = sum(1 for op in ins if op.is_value())
has_varargs = (VARIABLE_ARGS in tuple(op.kind for op in ins))
sig = (imm_kinds, num_values, has_varargs)
if sig in InstructionFormat._registry:
return InstructionFormat._registry[sig]
# Try another value list format as an alternative.
sig = (imm_kinds, 0, True)
if sig in InstructionFormat._registry:
return InstructionFormat._registry[sig]
raise RuntimeError(
'No instruction format matches '
'imms={}, vals={}, varargs={}'.format(
imm_kinds, num_values, has_varargs))
@staticmethod
def extract_names(globs):
# t
|
jugovich/teresajugovich
|
config/test.py
|
Python
|
mit
| 1,293 | 0.003867 |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'tjtest', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '5432', # Set to empty string for default.
}
}
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
DEBUG = False
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['tjugovich.webfactional.com']
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/w
|
ww/example.com/media/"
MEDIA_ROOT = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/home/tjugovich/webapps/test_static'
| |
genialis/resolwe
|
resolwe/flow/signals.py
|
Python
|
apache-2.0
| 2,030 | 0 |
""".. Ignore pydocstyle D400.
============
|
===
Signal Handlers
===============
"""
from asgiref.sync import async_to_sync
from django.conf import settings
from django.db import transaction
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from resolwe.flow.managers import manager
from resolwe.flow.models import Data, Relation
from resolwe.flow.models.entity import RelationPartition
def commit_signal(data_id):
"""Nudge manager at
|
the end of every Data object save event."""
if not getattr(settings, "FLOW_MANAGER_DISABLE_AUTO_CALLS", False):
immediate = getattr(settings, "FLOW_MANAGER_SYNC_AUTO_CALLS", False)
async_to_sync(manager.communicate)(data_id=data_id, run_sync=immediate)
@receiver(post_save, sender=Data)
def manager_post_save_handler(sender, instance, created, **kwargs):
"""Run newly created (spawned) processes."""
if (
instance.status == Data.STATUS_DONE
or instance.status == Data.STATUS_ERROR
or created
):
# Run manager at the end of the potential transaction. Otherwise
# tasks are send to workers before transaction ends and therefore
# workers cannot access objects created inside transaction.
transaction.on_commit(lambda: commit_signal(instance.id))
# NOTE: m2m_changed signal cannot be used because of a bug:
# https://code.djangoproject.com/ticket/17688
@receiver(post_delete, sender=RelationPartition)
def delete_relation(sender, instance, **kwargs):
"""Delete the Relation object when the last Entity is removed."""
def process_signal(relation_id):
"""Get the relation and delete it if it has no entities left."""
try:
relation = Relation.objects.get(pk=relation_id)
except Relation.DoesNotExist:
return
if relation.entities.count() == 0:
relation.delete()
# Wait for partitions to be recreated.
transaction.on_commit(lambda: process_signal(instance.relation_id))
|
fccoelho/pypln.backend
|
pypln/backend/workers/tokenizer.py
|
Python
|
gpl-3.0
| 1,140 | 0.000877 |
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
#
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR
|
PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mongodict import MongoDict
from nltk import word_tokenize, sent_tokenize
from pypln.backend.celery_task import PyPLNTask
class Tokenizer(PyPLNTask):
def process(self, document):
text = document['text']
tokens = word_tokenize(text)
sentences = [word_tokenize(sent) for sent in sent_tokenize(text)]
return {'tokens': tokens, 'sentences': sentences}
|
ctalbert/mozharness
|
configs/partner_repacks/release_mozilla-release_android.py
|
Python
|
mpl-2.0
| 1,585 | 0.002524 |
FTP_SERVER = "stage.mozilla.org"
FTP_USER = "ffxbld"
FTP_SSH_KEY = "~/.ssh/ffxbld_dsa"
FTP_UPLOAD_BASE_DIR = "/pub/mozilla.org/mobile/candidates/%(version)s-candidates/build%(buildnum)d"
DOWNLOAD_BASE_URL = "http://%s%s" % (FTP_SERVER, FTP_UPLOAD_BASE_DIR)
APK_BASE_NAME = "fennec-%(version)s.%(locale)s.android-arm.apk"
HG_SHARE_BASE_DIR = "/builds/hg-shared"
KEYSTORE = "/home/cltsign/.android/android-release.keystore"
KEY_ALIAS = "release"
config = {
"log_name": "partner_repack",
"locales_file": "buildbot-configs/mozilla/l10n-changesets_mobile-release.json",
"additional_locales": ['en-US'],
"platforms": ["android"],
"repos": [{
"repo": "http://hg.mozilla.org/build/buildbot-configs",
"revision": "default",
}],
'vcs_share_base': HG_SHARE_BASE_DIR,
"ftp_upload_base_dir": FTP_UPLOAD_BASE_DIR,
"ftp_ssh_key": FTP_SSH_KEY,
"ftp_user": FTP_USER,
"ftp_server": FTP_SERVER,
"installer_base_names": {
"android": APK_BASE_NAME,
},
"partner_config": {
"google-play": {},
|
},
"download_unsigned_base_subdir": "unsigned/%(platform)s/%(locale)s",
"download_base_url": DOWNLOAD_BASE_URL,
"release_config_file": "buildbot-configs/mozilla/release-fennec-mozilla-release.py",
"default_actions": ["clobber", "pull", "download", "repack", "upload-unsigned-bits"],
# signing (optional)
"keystore": KEYSTORE,
"key_a
|
lias": KEY_ALIAS,
"exes": {
"jarsigner": "/tools/jdk-1.6.0_17/bin/jarsigner",
"zipalign": "/tools/android-sdk-r8/tools/zipalign",
},
}
|
EPFL-LCN/neuronaldynamics-exercises
|
neurodynex3/cable_equation/passive_cable.py
|
Python
|
gpl-2.0
| 6,153 | 0.004063 |
"""
Implements compartmental model of a passive cable. See Neuronal Dynamics
`Chapter 3 Section 2 <http://neuronaldynamics.epfl.ch/online/Ch3.S2.html>`_
"""
# This file is part of the exercise code repository accompanying
# the book: Neuronal Dynamics (see http://neuronaldynamics.epfl.ch)
# located at http://github.com/EPFL-LCN/neuronaldynamics-exercises.
# This free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License 2.0 as published by the
# Free Software Foundation. You should have received a copy of the
# GNU General Public License along with the repository. If not,
# see http://www.gnu.org/licenses/.
# Should you reuse and publish the code for your own purposes,
# please cite the book or point to the webpage http://neuronaldynamics.epfl.ch.
# Wulfram Gerstner, Werner M. Kistler, Richard Naud, and Liam Paninski.
# Neuronal Dynamics: From Single Neurons to Networks and Models of Cognition.
# Cambridge University Press, 2014.
import brian2 as b2
from neurodynex3.tools import input_factory
import matplotlib.pyplot as plt
import numpy as np
# integration time step in milliseconds
b2.defaultclock.dt = 0.01 * b2.ms
# DEFAULT morphological and electrical parameters
CABLE_LENGTH = 500. * b2.um # length of dendrite
CABLE_DIAMETER = 2. * b2.um # diameter of dendrite
R_LONGITUDINAL = 0.5 * b2.kohm * b2.mm # Intracellular medium resistance
R_TRANSVERSAL = 1.25 * b2.Mohm * b2.mm ** 2 # cell membrane resistance (->leak current)
E_LEAK = -70. * b2.mV # reversal potential of the leak current (-> resting potential)
CAPACITANCE = 0.8 * b2.uF / b2.cm ** 2 # membrane capacitance
DEFAULT_INPUT_CURRENT = input_factory.get_step_current(2000, 3000, unit_time=b2.us, amplitude=0.2 * b2.namp)
DEFAULT_INPUT_LOCATION = [CABLE_LENGTH / 3] # provide an array of locations
# print("Membrane Timescale = {}".format(R_TRANSVERSAL*CAPACITANCE))
def simulate_passive_cable(current_injection_location=DEFAULT_INPUT_LOCATION, input_current=DEFAULT_INPUT_CURRENT,
length=CABLE_LENGTH, diameter=CABLE_DIAMETER,
r_longitudinal=R_LONGITUDINAL,
r_transversal=R_TRANSVERSAL, e_leak=E_LEAK, initial_voltage=E_LEAK,
capacitance=CAPACITANCE, nr_compartments=200, simulation_time=5 * b2.ms):
"""Builds a multicompartment cable and numerically approximates the cable equation.
Args:
t_spikes (int): list of spike times
current_injection_location (list): List [] of input locations (Quantity, Length): [123.*b2.um]
input_current (TimedArray): TimedArray of current amplitudes. One column per current_injection_location.
length (Quantity): Length of the cable: 0.8*b2.mm
diameter (Quantity): Diameter of the cable: 0.2*b2.um
r_longitudinal (Quantity): The longitudinal (axial) resistance of the cable: 0.5*b2.kohm*b2.mm
r_transversal (Quantity): The transversal resistance (=membrane resistance): 1.25*b2.Mohm*b2.mm**2
e_leak (Quantity): The reversal potential of the leak current (=resting potential): -70.*b2.mV
initial_voltage (Quantity): Value of the potential at t=0: -70.*b2.mV
capacitance (Quantity): Membrane capacitance: 0.8*b2.uF/b2.cm**2
nr_compartments (int): Number of compartments. Spatial discretization: 200
simulation_time (Quantity): Time for which the dynamics are simulated: 5*b2.ms
Returns:
(StateMonitor, SpatialNeuron): The state monitor contains the membrane voltage in a
Time x Location matrix. The SpatialNeuron object specifies the simulated neuron model
and gives access to the morphology. You may want to use those objects for
spatial indexing: myVoltageStateMonitor[mySpatialNeuron.morphology[0.123*b2.um]].v
"""
assert isinstance(input_current, b2.TimedArray), "input_current is not of type TimedArray"
assert input_current.values.shape[1] == len(current_injection_location),\
"number of injection_locations does not match nr of input currents"
cable_morphology = b2.Cylinder(diameter=diameter, length=length, n=nr_compartments)
# Im is transmembrane current
# Iext is injected current at a specific position on dendrite
EL = e_leak
RT = r_transversal
eqs = """
Iext = current(t, location_index): amp (point current)
location_index : integer (constant)
Im = (EL-v)/RT : amp/meter**2
"""
cable_model = b2.SpatialNeuron(morphology=cable_morphology, model=eqs, Cm=capacitance, Ri=r_longitudinal)
monitor_v = b2.StateMonitor(cable_model, "v", record=True)
# inject all input currents at the specified location:
nr_input_locations = len(current_injection_location)
input_current_0 = np.insert(input_current.values, 0, 0., axis=1) * b2.amp # insert default current: 0. [amp]
current = b2.TimedArray(input_current_0, dt=input_current.dt * b2.second)
for current_index in range(nr_input_locations):
insert_location = current_injection_location[current_in
|
dex]
compartment_index = int(np.floor(insert_location / (length / nr_compartm
|
ents)))
# next line: current_index+1 because 0 is the default current 0Amp
cable_model.location_index[compartment_index] = current_index + 1
# set initial values and run for 1 ms
cable_model.v = initial_voltage
b2.run(simulation_time)
return monitor_v, cable_model
def getting_started():
"""A simple code example to get started.
"""
current = input_factory.get_step_current(500, 510, unit_time=b2.us, amplitude=3. * b2.namp)
voltage_monitor, cable_model = simulate_passive_cable(
length=0.5 * b2.mm, current_injection_location=[0.1 * b2.mm], input_current=current,
nr_compartments=100, simulation_time=2 * b2.ms)
# provide a minimal plot
plt.figure()
plt.imshow(voltage_monitor.v / b2.volt)
plt.colorbar(label="voltage")
plt.xlabel("time index")
plt.ylabel("location index")
plt.title("vm at (t,x), raw data voltage_monitor.v")
plt.show()
if __name__ == "__main__":
getting_started()
|
vejmelkam/emotiv-reader
|
src/signal_renderer_widget.py
|
Python
|
gpl-3.0
| 7,269 | 0.007979 |
import numpy as np
import bisect
import pygame
import scipy.signal
from albow.widget import Widget, overridable_property
from albow.theme import ThemeProperty
class SignalRendererWidget(Widget):
def __init__(self, signal_list, dev, buf, rect, **kwds):
"""
Initialize the renderer with the signal_name to index mapping
(always all 14 signals). The measurement device, the signal
buffer and the rectangle into which the signals are to be rendered.
To select shown signals, use select_channels.
"""
Widget.__init__(self, rect, **kwds)
self.sig_list = signal_list
self.dev = dev
self.buf = buf
self.font = pygame.font.SysFont("Ubuntu", 20, True)
self.cq_font = pygame.font.SysFont("Ubuntu", 16, True)
self.multiplier = 1.0
self.selected = range(14)
self.display_type = [0] * 14
def select_channels(self, which):
"""
Supply a new array of integers which indicate the signals to show.
"""
self.selected = which
def toggle_channel(self, ndx):
"""
Toggle the display of channel with index ndx (0..13).
"""
if ndx in self.selected:
# if self.display_type[ndx] == 1:
# self.selected.remove(ndx)
# else:
# self.display_type[ndx] = 1
self.selected.remove(ndx)
else:
# need to re-sort the list after the append
bisect.insort(self.selected, ndx)
self.display_type[ndx] = 0
def update_magnification(self, update):
"""
Set the magnification of the displayed signal.
"""
self.multiplier = max(0.2, self.multiplier + update)
def render_time_series(self, sig, color, frame, surf):
"""
Render a time series representation (given by pts) into rect.
"""
# draw the zero level
zero_ax_y = frame.top + frame.height // 2
pygame.draw.line(surf, (70, 70, 70),
(frame.left, zero_ax_y),
(frame.right, zero_ax_y))
pygame.draw.line(surf, (20, 60, 20, 30),
(frame.left, frame.bottom),
(frame.right,
|
frame.bottom))
# draw the signal onto the screen (remove mean in buffer)
zero_lev = np.mean(sig)
sig_amp = max(np.max(sig) - zero_lev, zero_lev - np.min(sig))
if sig_amp == 0:
sig_amp = 1.0
# pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0
pixel_per_lsb = self.multiplier * frame.height / (200.0 / 0.51)
draw_pts_y = zero_ax_y - (sig - zero_lev) * pixel_per_
|
lsb
draw_pts_y[draw_pts_y < frame.top] = frame.top
draw_pts_y[draw_pts_y > frame.bottom] = frame.bottom
draw_pts_x = np.linspace(0, frame.width, len(sig)) + frame.left
pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y))
# draw a bar that corresponds to 10uV
uV10_len = 10.0 / 0.51 * pixel_per_lsb
if uV10_len > frame.height:
uV10_len = frame.height * 3 // 4
uV10_col = (255, 0, 0)
else:
uV10_col = (0, 0, 0)
pygame.draw.line(surf, uV10_col,
(frame.right - 10, zero_ax_y - uV10_len // 2),
(frame.right - 10, zero_ax_y + uV10_len // 2), 2)
def render_spectrum(self, sig, color, frame, surf):
"""
Render a spectral representation of the signal.
"""
min_freq = 0.7
max_freq = 45.0
s2 = sig.copy()
# special check for all zeros (no data situation)
if np.all(s2 == 0.0):
sp = np.zeros(shape = (s2.shape[0] // 2, ))
else:
tm = np.arange(len(sig), dtype = np.float64) / 128.0
angular_freqs = np.linspace(2.0 * np.pi * min_freq,
2.0 * np.pi * max_freq, 100)
# pg = scipy.signal.lombscargle(tm, s2, angular_freqs)
# sp = np.sqrt(4 * (pg / tm.shape[0]))
s2 = s2 - np.mean(s2)
sp = np.abs(np.fft.rfft(s2))
# if there are any non-finite values, replace buffer with zeros
if not np.all(np.isfinite(sp)):
sp[:] = 0.0
# autoscale the spectral display
# sp -= np.amin(sp)
sig_amp = np.amax(sp)
if sig_amp == 0:
sig_amp = 1.0
pixel_per_lsb = self.multiplier * frame.height / sig_amp / 2.0
draw_pts_y = frame.bottom - sp * pixel_per_lsb
draw_pts_x = np.linspace(0, frame.width, len(sp)) + frame.left
# draw line at bottom of frame
pygame.draw.line(surf, (20, 60, 20, 30), (frame.left, frame.bottom),
(frame.right, frame.bottom))
# draw the spectrum in dB
pygame.draw.lines(surf, color, False, zip(draw_pts_x, draw_pts_y))
# draw spectral bands
for f in [5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0]:
x = (f - min_freq) / max_freq * frame.width + frame.left
pygame.draw.line(surf, (0, 0, 0), (x, frame.top), (x, frame.bottom))
# fixme: draw 20dB? yardstick
def render_name_and_contact_quality(self, chan_name, frame, surf):
# draw a bar indicating contact quality
cq = self.dev.cq[chan_name]
cr, cr_str = self.dev.contact_resistance(chan_name)
# map signal resistance to color
if cr is None or cr > 1000:
quality_color = (255, 0, 0)
elif cr > 50:
quality_color = (200, 100, 20)
elif cr > 20:
quality_color = (200, 100, 20)
else:
quality_color = (20, 150, 20)
zero_ax_y = frame.top + frame.height // 2
surf.blit(self.font.render(chan_name, 1, (0,0,0)), (frame.right - 150, zero_ax_y - 10))
surf.blit(self.cq_font.render('%d (%s)' % (cq, cr_str), 1, quality_color),
(frame.right - 150, zero_ax_y + 10))
def draw(self, surf):
"""
Draw the signals. Here we expect the signal buffer to be updated.
"""
frame = surf.get_rect()
pygame.draw.rect(surf, (255,255,255), frame)
# plot the signals
Nsig = len(self.selected)
if Nsig == 0:
return
gr_height = (frame.bottom - frame.top) // Nsig
gr_width = frame.width
# get a handle to the buffer
self.buf.pull_packets(self.dev)
buf = self.buf.buffer()
# for each signal repeat
for s, sndx in zip(self.selected, range(len(self.selected))):
# retrieve channel name
chan_name = self.sig_list[s]
# compute target rectangle
rect = pygame.Rect(frame.left, frame.top + gr_height * sndx, frame.width, gr_height)
# render a time series representation
color = (255, 0, 0) if sndx % 2 == 0 else (0, 0, 255)
if self.display_type[s] == 0:
self.render_time_series(buf[:,s], color, rect, surf)
else:
self.render_spectrum(buf[:,s], color, rect, surf)
# draw the signal name
self.render_name_and_contact_quality(chan_name, rect, surf)
|
twosigma/beaker-notebook
|
beakerx_tabledisplay/beakerx_tabledisplay/__init__.py
|
Python
|
apache-2.0
| 1,170 | 0.001709 |
# Copyright 2019 TWO SIGMA OPEN SOURCE, LLC
#
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES
|
OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tableitems import *
from .tabledisplay import *
from ._version import version_info, __version__
from .handlers import load_jupyter_server_extension
from .commands import parse
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'beakerx_tabledisplay',
'require': 'beakerx_tabledisplay/index'
}
]
def _jupyter_server_extension_paths():
return [dict(module="beakerx_tabledisplay")]
def run():
try:
parse()
except KeyboardInterrupt:
return 130
return 0
|
opensciences/var
|
Import from googlecode/importAutomation.py
|
Python
|
mit
| 4,045 | 0.006922 |
"""
Exceptions:
mccabehalsted: there are triple curly brackets ({{{) in jm1 that jekyll doesn't like
spe: is in "other" directory in terapromise
reuse: double curly brackets in reuse
dump: links end like "/ABACUS2013" without closing slash
"""
relativePath = "defect/ck/"
import os, re, datetime
from types import NoneType
def extractSummary(fileContents):
return re.search("^#summary ([^\n]+)\n", fileContents).group(1)
def extractAuthor(fileContents):
results = re.search(r"\|\| Donated by (\[[^ ]* )?([^\]|]+)\]? \|\|", fileContents)
if type(results.group(2)) == NoneType:
return results.group(1)
else:
return results.group(2)
def genHeader(baseName, fileContents):
sum
|
mary = extractSummary(fileContents)
author = extractAuthor(fileContents)
return """---
title: """ + baseName + """
excerpt: """ + summary + """
layout: repo
author: """ + author + """
---
"""
def doDeletions(fileContents):
return re.sub(r"#summary [^\n]+\n#
|
labels [^\n]+\n\n<wiki:toc max_depth=\"2\" />", "", fileContents)
def changeHeaders(fileContents):
return re.sub(r"\n= ([^\n]+) =\n", r"\n#\1\n", fileContents)
def reformatLinks(fileContents):
sub = re.sub(r"[^\[]http([^\s]+)", r"[http\1 http\1]", fileContents)
return re.sub(r"\[([^ ]+) ([^\]]+)\]", r"[\2](\1)", sub)
def changeURLs(fileContents, relativePath):
hasHiddenParentQ = (type(re.search(r"\d$", baseName)) != NoneType) and (relativePath == "defect/mccabehalsted/")
teraPromiseRelativePath = relativePath + baseName
if hasHiddenParentQ:
teraPromiseRelativePath = relativePath + baseName[:-1] + "/" + baseName
sub = re.sub("http://promisedata.googlecode.com/svn/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", fileContents)
return re.sub("http://code.google.com/p/promisedata/source/browse/trunk/[^/]+/(" + baseName + "/)?", "https://terapromise.csc.ncsu.edu:8443/svn/repo/" + teraPromiseRelativePath + r"/", sub)
def removeExtraneousLinks(fileContents):
return fileContents
def reformatTables(fileContents):
sub = re.sub(r"\|\| When \|\| What \|\|", r"When | What\r---- | ----", fileContents)
return re.sub(r"\|\| ([^|]+) \|\| ([^|]+) \|\|", r"\1 | \2", sub)
def escapeCurlyBrackets(fileContents):
sub = re.sub(r"{", r"\{", fileContents)
return re.sub(r"}", r"\}", sub)
def extractDateString(fileContents):
result = re.search(r"\n\|\| *([^ |]+ [^ |]+ [^ |]+) *\|\| Donated by[^|]+\|\|", fileContents).group(1)
return result
def dateAddedString(fileContents):
dateString = extractDateString(fileContents)
date = datetime.datetime.strptime(dateString, "%B %d, %Y").date()
return date.strftime("%Y-%m-%d-")
directory = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/"
writeDirPath = "/Users/Carter/Documents/OpenSciences/opensciences.github.io/repo/" + relativePath + "_posts/"
for subdir, dirs, files in os.walk(directory):
for eachFileName in files:
print(eachFileName)
if eachFileName[-5:] != ".wiki":
continue
readFilePath = directory + eachFileName
baseName = os.path.basename(readFilePath)[:-5]
readObj = file(readFilePath, "r")
fileContents = readObj.read()
readObj.close()
newFileName = dateAddedString(fileContents) + os.path.basename(readFilePath)[:-5] + ".md"
newFilePath = directory + newFileName
header = genHeader(baseName, fileContents)
fileContents = doDeletions(fileContents)
fileContents = changeHeaders(fileContents)
fileContents = reformatLinks(fileContents)
fileContents = changeURLs(fileContents, relativePath)
fileContents = removeExtraneousLinks(fileContents)
fileContents = reformatTables(fileContents)
fileContents = escapeCurlyBrackets(fileContents)
writeObj = file(newFilePath, "w")
writeObj.write(header + fileContents)
writeObj.close()
|
rsnakamura/oldape
|
apetools/builders/subbuilders/teardownbuilder.py
|
Python
|
apache-2.0
| 1,942 | 0.000515 |
from apetools.baseclass import BaseClass
from apetools.tools import copyfiles
from apetools.log_setter import LOGNAME
from apetools.proletarians import teardown
|
class TearDownBuilder(BaseClass):
"""
A basic tear-down builder that just copies log and config files.
"""
def __init__(self, configfilename, storage, subdir="logs"):
"""
:param:
- `configfilename`: the name of the config file to copy
- `storage`: A storage object aimed at the data f
|
older.
"""
super(TearDownBuilder, self).__init__()
self.configfilename = configfilename
self.storage = storage
self.subdir = subdir
self._configcopier = None
self._logcopier = None
self._teardown = None
return
@property
def configcopier(self):
"""
:return: A file copier aimed at the config file
"""
if self._configcopier is None:
self._configcopier = copyfiles.CopyFiles((self.configfilename,),
self.storage,
self.subdir)
return self._configcopier
@property
def logcopier(self):
"""
:return: A file copier aimed at the log file
"""
if self._logcopier is None:
self._logcopier = copyfiles.CopyFiles((LOGNAME,),
self.storage,
self.subdir)
return self._logcopier
@property
def teardown(self):
"""
:return: A teardown object for the test-operator to run to cleanup
"""
if self._teardown is None:
self._teardown = teardown.TeardownSession((self.configcopier,
self.logcopier))
return self._teardown
# End class TearDownBuilder
|
belokop/indico_bare
|
indico/MaKaC/common/contextManager.py
|
Python
|
gpl-3.0
| 811 | 0 |
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# pub
|
lished by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if no
|
t, see <http://www.gnu.org/licenses/>.
"""
Just for backwards-compatibility
"""
from indico.util.contextManager import *
|
ancho85/pylint-playero-plugin
|
tests/test_funcs.py
|
Python
|
gpl-2.0
| 3,850 | 0.012987 |
import unittest
from libs.funcs import *
class TestFuncs(unittest.TestCase):
def test_buildPaths(self):
recPaths, repPaths, rouPaths, corePaths = buildPaths()
findTxt = lambda x, y: x.find(y) > -1
assert findTxt(recPaths["Task"][0], "base")
assert findTxt(recPaths["Department"][0], "StdPy")
assert findTxt(recPaths["Department"][1], "standard")
assert findTxt(repPaths["ListWindowReport"][0], "base")
assert findTxt(repPaths["ExpensesList"][0], "StdPy")
assert findTxt(repPaths["ExpensesList"][1], "standard")
assert findTxt(rouPaths["GenNLT"][0], "StdPy")
assert findTxt(rouPaths["GenNLT"][1], "standard")
assert findTxt(corePaths["Field"][0], "embedded")
self.assertFalse([k for (k, v) in rouPaths.iteritems() if findTxt(v[0], "base")]) #no routines in base
def test_recordInheritance(self):
recf, recd = getRecordInheritance("Invoice")
assert all([f1 in recf for f1 in ("SalesMan", "InvoiceDate", "CustCode", "Currency", "ShiftDate", "OriginNr", "SerNr", "attachFlag")])
assert all([d in recd for d in ("CompoundItemCosts", "Payments", "Items", "Taxes", "Installs")])
recf, recd = getRecordInheritance("AccessGroup")
assert all([f2 in recf for f2 in ("PurchaseItemsAccessType", "InitialModule", "Closed", "internalId")])
assert all([d in recd for d in ("PurchaseItems", "Customs", "Modules")])
def test_recordsInfo(self):
recf, recd = getRecordsInfo("Department", RECORD)
assert recf["Department"]["AutoCashCancel"] == "integer
|
" #From StdPy
assert recf["Department"]["DeptName"] == "string" #From standard
assert recf["Department"]["Closed"] == "Boolean" #From Master
assert recf["Department"]["internalId"] == "internalid" #From Record
assert recd["Department"]["OfficePayModes"] == "DepartmentOfficePayModeRow" #Recordname from detail
repf, repd = getRecordsInfo("Bala
|
nce", REPORT)
assert repf["Balance"]["LabelType"] == "string" #StdPy
assert repf["Balance"]["ExplodeByLabel"] == "boolean" #Standard
assert repf["Balance"]["internalId"] == "internalid" #Record
assert not repd["Balance"] #Empty dict, no detail
rouf, roud = getRecordsInfo("GenNLT", ROUTINE)
assert rouf["GenNLT"]["ExcludeInvalid"] == "boolean"
assert rouf["GenNLT"]["Table"] == "string"
assert not roud["GenNLT"]
rouf, roud = getRecordsInfo("LoginDialog", RECORD)
assert rouf["LoginDialog"]["Password"] == "string" #embedded
assert not roud["LoginDialog"]
def test_classInfo(self):
attr, meth = getClassInfo("Invoice")
assert attr["DEBITNOTE"] == 2
assert attr["ATTACH_NOTE"] == 3
assert attr["rowNr"] == 0
assert attr["ParentInvoice"] == "SuperClass"
assert isinstance(attr["DocTypes"], list)
assert isinstance(attr["Origin"], dict)
assert all([m in meth for m in ("getCardReader", "logTransactionAction", "updateCredLimit",
"generateTaxes", "roundValue", "getOriginType", "bring", "getXML", "createField")])
assert meth["fieldIsEditable"][0] == "self"
assert meth["fieldIsEditable"][1] == "fieldname"
assert meth["fieldIsEditable"][2] == {"rowfieldname":'None'}
assert meth["fieldIsEditable"][3] == {"rownr":'None'}
attr, meth = getClassInfo("User")
assert attr["buffer"] == "RecordBuffer"
assert all([m in meth for m in ("store", "save", "load", "hasField")])
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestFuncs))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
tdsmith/ponysay
|
src/balloon.py
|
Python
|
gpl-3.0
| 8,019 | 0.009993 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
ponysay - Ponysay, cowsay reimplementation for ponies
Copyright (C) 2012, 2013, 2014 Erkin Batu Altunbaş et al.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
If you intend to redistribute ponysay or a fork of it commercially,
it contains aggregated images, some of which may not be commercially
redistribute, you would be required to remove those. To determine
whether or not you may commercially redistribute an image make use
that line ‘FREE: yes’, is included inside the image between two ‘$$$’
lines and the ‘FREE’ is and upper case and directly followed by
the colon.
'''
from common import *
from ucs import *
class Balloon():
'''
Balloon format class
'''
def __init__(self, link, linkmirror, linkcross, ww, ee, nw, nnw, n, nne, ne, nee, e, see, se, sse, s, ssw, sw, sww, w, nww):
'''
Constructor
@param link:str The \-directional balloon line character
@param linkmirror:str The /-directional balloon line character
@param linkcross:str The /-directional balloon crossing a \-directional ballonon line character
@param ww:str See the info manual
@param ee:str See the info manual
@param nw:list<str> See the info manual
@param nnw:list<str> See the info manual
@param n:list<str> See the info manual
@param nne:list<str> See the info manual
@param ne:list<str> See the info manual
@param nee:str See the info manual
@param e:str See the info manual
@param see:str See the info manual
@param se:list<str> See the info manual
@param sse:list<str> See the info manual
@param s:list<str> See the info manu
|
al
@param ssw:list<str> See the info manual
@param sw:list<str> See the info manual
@param sww:str See the info manual
@param w:str See the info manual
@param nww:str See the info manual
'''
(self.link, self.linkmirror, self.linkcross) = (link, linkmirror, linkcross)
(self.ww, self.ee) = (ww, ee)
(self.nw, self.ne, self.se, self.sw) = (nw, ne, se, sw)
(s
|
elf.nnw, self.n, self.nne) = (nnw, n, nne)
(self.nee, self.e, self.see) = (nee, e, see)
(self.sse, self.s, self.ssw) = (sse, s, ssw)
(self.sww, self.w, self.nww) = (sww, w, nww)
_ne = max(ne, key = UCS.dispLen)
_nw = max(nw, key = UCS.dispLen)
_se = max(se, key = UCS.dispLen)
_sw = max(sw, key = UCS.dispLen)
minE = UCS.dispLen(max([_ne, nee, e, see, _se, ee], key = UCS.dispLen))
minW = UCS.dispLen(max([_nw, nww, e, sww, _sw, ww], key = UCS.dispLen))
minN = len(max([ne, nne, n, nnw, nw], key = len))
minS = len(max([se, sse, s, ssw, sw], key = len))
self.minwidth = minE + minE
self.minheight = minN + minS
def get(self, minw, minh, lines, lencalc):
'''
Generates a balloon with a message
@param minw:int The minimum number of columns of the balloon
@param minh:int The minimum number of lines of the balloon
@param lines:list<str> The text lines to display
@param lencalc:int(str) Function used to compute the length of a text line
@return :str The balloon as a formated string
'''
## Get dimension
h = self.minheight + len(lines)
w = self.minwidth + lencalc(max(lines, key = lencalc))
if w < minw: w = minw
if h < minh: h = minh
## Create edges
if len(lines) > 1:
(ws, es) = ({0 : self.nww, len(lines) - 1 : self.sww}, {0 : self.nee, len(lines) - 1 : self.see})
for j in range(1, len(lines) - 1):
ws[j] = self.w
es[j] = self.e
else:
(ws, es) = ({0 : self.ww}, {0 : self.ee})
rc = []
## Create the upper part of the balloon
for j in range(0, len(self.n)):
outer = UCS.dispLen(self.nw[j]) + UCS.dispLen(self.ne[j])
inner = UCS.dispLen(self.nnw[j]) + UCS.dispLen(self.nne[j])
if outer + inner <= w:
rc.append(self.nw[j] + self.nnw[j] + self.n[j] * (w - outer - inner) + self.nne[j] + self.ne[j])
else:
rc.append(self.nw[j] + self.n[j] * (w - outer) + self.ne[j])
## Encapsulate the message instead left and right edges of balloon
for j in range(0, len(lines)):
rc.append(ws[j] + lines[j] + ' ' * (w - lencalc(lines[j]) - UCS.dispLen(self.w) - UCS.dispLen(self.e)) + es[j])
## Create the lower part of the balloon
for j in range(0, len(self.s)):
outer = UCS.dispLen(self.sw[j]) + UCS.dispLen(self.se[j])
inner = UCS.dispLen(self.ssw[j]) + UCS.dispLen(self.sse[j])
if outer + inner <= w:
rc.append(self.sw[j] + self.ssw[j] + self.s[j] * (w - outer - inner) + self.sse[j] + self.se[j])
else:
rc.append(self.sw[j] + self.s[j] * (w - outer) + self.se[j])
return '\n'.join(rc)
@staticmethod
def fromFile(balloonfile, isthink):
'''
Creates the balloon style object
@param balloonfile:str The file with the balloon style, may be `None`
@param isthink:bool Whether the ponythink command is used
@return :Balloon Instance describing the balloon's style
'''
## Use default balloon if none is specified
if balloonfile is None:
if isthink:
return Balloon('o', 'o', 'o', '( ', ' )', [' _'], ['_'], ['_'], ['_'], ['_ '], ' )', ' )', ' )', ['- '], ['-'], ['-'], ['-'], [' -'], '( ', '( ', '( ')
return Balloon('\\', '/', 'X', '< ', ' >', [' _'], ['_'], ['_'], ['_'], ['_ '], ' \\', ' |', ' /', ['- '], ['-'], ['-'], ['-'], [' -'], '\\ ', '| ', '/ ')
## Initialise map for balloon parts
map = {}
for elem in ('\\', '/', 'X', 'ww', 'ee', 'nw', 'nnw', 'n', 'nne', 'ne', 'nee', 'e', 'see', 'se', 'sse', 's', 'ssw', 'sw', 'sww', 'w', 'nww'):
map[elem] = []
## Read all lines in the balloon file
with open(balloonfile, 'rb') as balloonstream:
data = balloonstream.read().decode('utf8', 'replace')
data = [line.replace('\n', '') for line in data.split('\n')]
## Parse the balloon file, and fill the map
last = None
for line in data:
if len(line) > 0:
if line[0] == ':':
map[last].append(line[1:])
else:
last = line[:line.index(':')]
value = line[len(last) + 1:]
map[last].append(value)
## Return the balloon
return Balloon(map['\\'][0], map['/'][0], map['X'][0], map['ww'][0], map['ee'][0], map['nw'], map['nnw'], map['n'],
map['nne'], map['ne'], map['nee'][0], map['e'][0], map['see'][0], map['se'], map['sse'],
map['s'], map['ssw'], map['sw'], map['sww'][0], map['w'][0], map['nww'][0])
|
RudolfCardinal/crate
|
crate_anon/preprocess/postcodes.py
|
Python
|
gpl-3.0
| 52,340 | 0 |
#!/usr/bin/env python
"""
crate_anon/preprocess/postcodes.py
===============================================================================
Copyright (C) 2015-2021 Rudolf Cardinal (rudolf@pobox.com).
This file is part of CRATE.
CRATE is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundatio
|
n, either version 3 of the License, or
(at your option) any later version.
CRATE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A P
|
ARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CRATE. If not, see <https://www.gnu.org/licenses/>.
===============================================================================
**Fetches UK postcode information and creates a database.**
Code-Point Open, CSV, GB
- https://www.ordnancesurvey.co.uk/business-and-government/products/opendata-products.html
- https://www.ordnancesurvey.co.uk/business-and-government/products/code-point-open.html
- https://www.ordnancesurvey.co.uk/opendatadownload/products.html
- http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/
Office for National Statistics Postcode Database (ONSPD):
- https://geoportal.statistics.gov.uk/geoportal/catalog/content/filelist.page
- e.g. ONSPD_MAY_2016_csv.zip
- http://www.ons.gov.uk/methodology/geography/licences
Background:
- OA = Output Area
- smallest: >=40 households, >=100 people
- 181,408 OAs in England & Wales
- LSOA = Lower Layer Super Output Area
- 34,753 LSOAs in England & Wales
- MSOA = Middle Layer Super Output Area
- 7,201 MSOAs in England & Wales
- WZ = Workplace Zone
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#workplace-zone-wz
- https://www.ons.gov.uk/methodology/geography/ukgeographies/censusgeography#output-area-oa
""" # noqa
from abc import ABC, ABCMeta, abstractmethod
import argparse
import csv
import datetime
import logging
import os
import sys
# import textwrap
from typing import (Any, Dict, Generator, Iterable, List, Optional, TextIO,
Tuple)
from cardinal_pythonlib.argparse_func import RawDescriptionArgumentDefaultsHelpFormatter # noqa
from cardinal_pythonlib.dicts import rename_key
from cardinal_pythonlib.extract_text import wordwrap
from cardinal_pythonlib.fileops import find_first
from cardinal_pythonlib.logs import configure_logger_for_colour
import openpyxl
from openpyxl.cell.cell import Cell
import prettytable
from sqlalchemy import (
Column,
create_engine,
Date,
Integer,
Numeric,
String,
)
from sqlalchemy.ext.declarative import declarative_base, DeclarativeMeta
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from sqlalchemy.sql.schema import MetaData, Table
# import xlrd
from crate_anon.anonymise.constants import CHARSET, TABLE_KWARGS
from crate_anon.common.constants import EnvVar
log = logging.getLogger(__name__)
metadata = MetaData()
if EnvVar.GENERATING_CRATE_DOCS in os.environ:
DEFAULT_ONSPD_DIR = "/path/to/unzipped/ONSPD/download"
else:
DEFAULT_ONSPD_DIR = os.path.join(
os.path.expanduser("~"), "dev", "ons", "ONSPD_Nov2019"
)
DEFAULT_REPORT_EVERY = 1000
DEFAULT_COMMIT_EVERY = 10000
YEAR_MONTH_FMT = "%Y%m"
CODE_LEN = 9 # many ONSPD codes have this length
NAME_LEN = 80 # seems about right; a bit more than the length of many
# =============================================================================
# Ancillary functions
# =============================================================================
def convert_date(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a
:class:`datetime.datetime` or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value:
d[key] = datetime.datetime.strptime(value,
YEAR_MONTH_FMT)
else:
d[key] = None
def convert_int(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to an int or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = int(value)
def convert_float(d: Dict[str, Any], key: str) -> None:
"""
Modifies ``d[key]``, if it exists, to convert it to a float or ``None``.
Args:
d: dictionary
key: key
"""
if key not in d:
return
value = d[key]
if value is None or (isinstance(value, str) and not value.strip()):
d[key] = None
else:
d[key] = float(value)
def values_from_row(row: Iterable[Cell]) -> List[Any]:
"""
Returns all values from a spreadsheet row.
For the ``openpyxl`` interface to XLSX files.
"""
values = [] # type: List[Any]
for cell in row:
values.append(cell.value)
return values
def commit_and_announce(session: Session) -> None:
"""
Commits an SQLAlchemy ORM session and says so.
"""
log.info("COMMIT")
session.commit()
# =============================================================================
# Extend SQLAlchemy Base class
# =============================================================================
class ExtendedBase(object):
"""
Mixin to extend the SQLAlchemy ORM Base class by specifying table creation
parameters (specifically, for MySQL, to set the character set and
MySQL engine).
Only used in the creation of Base; everything else then inherits from Base
as usual.
See
http://docs.sqlalchemy.org/en/latest/orm/extensions/declarative/mixins.html
"""
__table_args__ = TABLE_KWARGS
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
# =============================================================================
# Go to considerable faff to provide type hints for lookup classes
# =============================================================================
class GenericLookupClassMeta(DeclarativeMeta, ABCMeta):
"""
To avoid: "TypeError: metaclass conflict: the metaclass of a derived class
must be a (non-strict) subclass of the metaclasses of all its bases".
We want a class that's a subclass of Base and ABC. So we can work out their
metaclasses:
.. code-block:: python
from abc import ABC
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.schema import MetaData
class ExtendedBase(object):
__table_args__ = {'mysql_charset': 'utf8', 'mysql_engine': 'InnoDB'}
metadata = MetaData()
Base = declarative_base(metadata=metadata, cls=ExtendedBase)
type(Base) # metaclass of Base: <class: 'sqlalchemy.ext.declarative.api.DeclarativeMeta'>
type(ABC) # metaclass of ABC: <class 'abc.ABCMeta'>
and thus define this class to inherit from those two metaclasses, so it can
be the metaclass we want.
""" # noqa
pass
class GenericLookupClassType(Base, ABC, metaclass=GenericLookupClassMeta):
"""
Type hint for our various simple lookup classes.
Alternatives that don't work: Type[Base], Type[BASETYPE], type(Base).
"""
__abstract__ = True # abstract as seen by SQLAlchemy
# ... avoids SQLAlchemy error: "sqlalchemy.exc.InvalidRequestError: Class
# <class '__main__.GenericLookupClassType'> does not have a __table__ or
# __tablename__ specified and does not inherit from an existing
# table-mapped class."
@abstractmethod
def __call__(self, *args, **kwargs) -> None:
# Represents __init__... not sure I have this quite right, but it
# appeases PyCharm; see populate_generic_lookup_table()
pass
@property
@abstractmethod
def _
|
stlemme/python-dokuwiki-export
|
visitor/__init__.py
|
Python
|
mit
| 301 | 0 |
from .visitor import Visitor
from .metavis
|
itor import MetaVisitor
from .experiments import ExperimentsVisitor
from .usedby import UsedByVisitor
from .testedscenarios import TestedScenariosVisitor
from .invalidentities import InvalidEntitiesVisitor
# from pres
|
enter.gesurvey import GESurveyPresenter
|
malmiron/incubator-airflow
|
tests/models.py
|
Python
|
apache-2.0
| 119,658 | 0.000911 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import inspect
import logging
import os
import re
import textwrap
import time
import unittest
import urllib
from tempfile import NamedTemporaryFile, mkdtemp
import pendulum
import six
from mock import ANY, Mock, mock_open, patch
from parameterized import parameterized
from airflow import AirflowException, configuration, models, settings
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import Connection
from airflow.models import DAG, TaskInstance as TI
from airflow.models import DagModel, DagRun, DagStat
from airflow.models import KubeResourceVersion, KubeWorkerIdentifier
from airflow.models import SkipMixin
from airflow.models import State as ST
from airflow.models import XCom
from airflow.models import clear_task_instances
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleTaskInstance
from airflow.utils.db import create_session
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from airflow.utils.weight_rule import WeightRule
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_params_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specify a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
with dag:
with dag:
op7 = DummyOperator(task_id='op7')
op8 = DummyOperator(task_id='op8')
op9 = DummyOperator(task_id='op8')
op9.dag = dag2
self.assertEqual(op7.dag, dag)
self.assertEqual(op8.dag, dag)
self.assertEqual(op9.dag, dag2)
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE
|
,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A
|
or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_naive_default_args_start_date(self):
dag = DAG('DAG', default_args={'start_date': datetime.datetime(2018, 1, 1)})
self.assertEqual(dag.timezone, settings.TIMEZONE)
dag = DAG('DAG', start_date=datetime.datetime(2018, 1, 1))
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_none_default_args_start_da
|
samabhi/pstHealth
|
venv/lib/python2.7/site-packages/easy_thumbnails/sorl-tests/classes.py
|
Python
|
mit
| 8,008 | 0 |
# -*- coding: utf-8 -*-
import os
import time
from StringIO import StringIO
from PIL import Image
from django.conf import settings
from easy_thumbnails.base import Thumbnail
from easy_thumbnails.main import DjangoThumbnail, get_thumbnail_setting
from easy_thumbnails.processors import dynamic_import, get_valid_options
from easy_thumbnails.tests.base import BaseTest, RELATIVE_PIC_NAME, PIC_NAME,\
THUMB_NAME, PIC_SIZE
class ThumbnailTest(BaseTest):
def testThumbnails(self):
# Thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 1,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb)
# Cropped thumbnail
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 2,
requested_size=(240, 240), opts=['crop'])
self.verify_thumbnail((240, 240), thumb)
# Thumbnail with altered JPEG quality
thumb = Thumbnail(source=PIC_NAME, dest=THUMB_NAME % 3,
requested_size=(240, 240), quality=95)
self.verify_thumbnail((240, 180), thumb)
def testRegeneration(self):
# Create thumbnail
thumb_name = THUMB_NAME % 4
thumb_size = (240, 240)
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.images_to_delete.add(thumb_name)
thumb_mtime = os.path.getmtime(thumb_name)
time.sleep(1)
# Create another instance, shouldn't generate a new thumb
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.assertEqual(os.path.getmtime(thumb_name), thumb_mtime)
# Recreate the source image, then see if a new thumb is generated
Image.new('RGB', PIC_SIZE).save(PIC_NAME, 'JPEG')
Thumbnail(source=PIC_NAME, dest=thumb_name, requested_size=thumb_size)
self.assertNotEqual(os.path.getmtime(thumb_name), thumb_mtime)
def testFilelikeDest(self):
# Thumbnail
filelike_dest = StringIO()
thumb = Thumbnail(source=PIC_NAME, dest=filelike_dest,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb)
def testRGBA(self):
# RGBA image
rgba_pic_name = os.path.join(settings.MEDIA_ROOT,
'easy-thumbnails-test_rgba_source.png')
Image.new('RGBA', PIC_SIZE).save(rgba_pic_name)
self.images_to_delete.add(rgba_pic_name)
# Create thumb and verify it's still RGBA
rgba_thumb_name = os.path.join(settings.MEDIA_ROOT,
'easy-thumbnails-test_rgba_dest.png')
thumb = Thumbnail(source=rgba_pic_name, dest=rgba_thumb_name,
requested_size=(240, 240))
self.verify_thumbnail((240, 180), thumb, expected_mode='RGBA')
class DjangoThumbnailTest(BaseTest):
def setUp(self):
super(DjangoThumbnailTest, self).setUp()
# Add another source image in a sub-directory for testing subdir and
# basedir.
self.sub_dir = os.path.join(settings.MEDIA_ROOT, 'test_thumbnail')
try:
os.mkdir(self.sub_dir)
except OSError:
pass
self.pic_subdir = os.path.join(self.sub_dir, RELATIVE_PIC_NAME)
Image.new('RGB', PIC_SIZE).save(self.pic_subdir, 'JPEG')
self.images_to_delete.add(self.pic_subdir)
def testFilenameGeneration(self):
basename = RELATIVE_PIC_NAME.replace('.', '_')
# Basic filename
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Changed quality and cropped
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=['crop'],
quality=95)
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_crop_q95.jpg'
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# All options on
processors = dynamic_import(get_thumbnail_setting('PROCESSORS'))
valid_options = get_valid_options(processors)
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), opts=valid_options)
expected = (os.path.join(settings.MEDIA_ROOT, basename) + '_240x120_'
'autocrop_bw_crop_detail_max_sharpen_upscale_q85.jpg')
self.verify_thumbnail((240, 120), thumb, expected_filename=expected)
# Different basedir
basedir = 'easy-thumbnails-test-basedir'
self.change_settings.change({'BASEDIR': basedir})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(basedir, self.sub_dir, basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different subdir
self.change_settings.change({'BASEDIR': '', 'SUBDIR': 'subdir'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT,
os.path.basename(self.sub_dir), 'subdir',
basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Different prefix
self.change_settings.change({'SUBDIR': '', 'PREFIX': 'prefix-'})
thumb = DjangoThumbnail(relative_source=self.pic_subdir,
requested_size=(240, 120))
expected = os.path.join(self.sub_dir, 'prefix-' + basename)
expected += '_240x120_q85.jpg'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
def testAlternateExtension(self):
basename = RELATIVE_PIC_NAME.replace('.', '_')
# Control JPG
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120))
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.jpg'
expected_jpg = expected
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Test PNG
thumb = DjangoThumbnail(relative_source=RELATIVE_PIC_NAME,
requested_size=(240, 120), extension='png')
expected = os.path.join(settings.MEDIA_ROOT, basename)
expected += '_240x120_q85.png'
self.verify_thumbnail((160, 120), thumb, expected_filename=expected)
# Compare the file size to make sure it's not just saving as a JPG with
# a different extension.
self.assertNotEqual(os.path.getsize(expected_jpg),
os.path.getsize(expected))
def testUnicodeName(self):
unicode_name = 'easy-thumbnails-ążśź_source.jpg'
unicode_path = os.path.join(settings.MEDIA_ROOT, unicode_name)
Image.new('RGB', PIC_SIZE).save(unicode_path)
self.images_to_delete.add(unicode_path)
thumb = DjangoThumbnail(relative_source=unicode_name,
requested_size=(240, 120))
base_name = unicode_name.replace('.', '_')
expected = os.path.join(settings.MEDIA_ROOT,
base_name + '_240x120_q85.jpg')
self.verify_thumbnail((160, 120), thumb, expected_fil
|
ename=expected)
def tearDown(self):
super(DjangoThumbnailTest, self).tearDown()
subdir = os.p
|
ath.join(self.sub_dir, 'subdir')
if os.path.exists(subdir):
os.rmdir(subdir)
os.rmdir(self.sub_dir)
|
Makeystreet/makeystreet
|
woot/apps/catalog/migrations/0025_auto__add_topshops.py
|
Python
|
apache-2.0
| 24,810 | 0.006771 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TopShops'
db.create_table(u'catalog_topshops', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('shop', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalog.Shop'])),
('score', self.gf('django.db.models.fields.IntegerField')()),
('time', self.gf('django.db.models.fields.DateTimeField')()),
))
db.send_create_signal(u'catalog', ['TopShops'])
def backwards(self, orm):
# Deleting model 'TopShops'
db.delete_table(u'catalog_topshops')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentTyp
|
e']"}),
u'id': ('dj
|
ango.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'catalog.comment': {
'Meta': {'object_name': 'Comment'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
'body': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.documentation': {
'Meta': {'object_name': 'Documentation'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']", 'null': 'True', 'blank': 'True'})
},
u'catalog.emailcollect': {
'Meta': {'object_name': 'EmailCollect'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '30'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'catalog.image': {
'Meta': {'object_name': 'Image'},
'added_time': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'large_url': ('django.db.models.fields.URLField', [], {'max_length': '1000'}),
'small_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likemakey': {
'Meta': {'object_name': 'LikeMakey'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'makey': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Makey']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproduct': {
'Meta': {'object_name': 'LikeProduct'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductdescription': {
'Meta': {'object_name': 'LikeProductDescription'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_description': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductDescription']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeproductimage': {
'Meta': {'object_name': 'LikeProductImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.ProductImage']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.likeshop': {
'Meta': {'object_name': 'LikeShop'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'shop': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Shop']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.liketutorial': {
'Meta': {'object_name': 'LikeTutorial'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Product']"}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'tutorial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalog.Tutorial']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.list': {
'Meta': {'object_name': 'List'},
'access': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'access'", 'symmetrical': 'False', 'to': u"orm['django_facebook.FacebookCustomUser']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.ListItem']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'owner'", 'to': u"orm['django_facebook.FacebookCustomUser']"})
},
u'catalog.listgroup': {
'Meta': {'object_name': 'ListGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalog.List']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
|
Nihn/fuzzy_logic
|
fuzzy/utils/__init__.py
|
Python
|
apache-2.0
| 44 | 0 |
from
|
functions import *
from ut
|
ils import *
|
jlec/coot
|
rcrane/coot_utils_adapter.py
|
Python
|
gpl-3.0
| 1,561 | 0.012172 |
#!/usr/bin/env python
"""Allows functions from coot_utils to be imported"""
# Copyright 2011, 2012 Kevin Keating
#
# Licensed under the Educational Community License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.osedu.org/licenses/ECL-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
#"import coot_utils" results in an error, so this module is required to retrieve
#functions that are defined in coot_utils
import os, sys
from os.path import exists, join
from coot
|
import *
use_gui_qm = False #coot_utils requires this variable to be defined
#search the Python path for coot_utils
for curpath in sys.path:
abspath = join(curpath, "coot_utils.py")
if exists(abspath):
#when we find it, exec it
#but first exec redefine_functions.py if it's in the same directory
#redefine_functions.py
|
renames func_py() to func(), which used to be done in coot_utils.py itself
#new versions of coot_utils.py requires this renaming to be done before being exec'ed
redefAbspath = join(curpath, "redefine_functions.py")
if exists(redefAbspath):
execfile(redefAbspath)
execfile(abspath)
break
|
sdpython/python3_module_template
|
_unittests/ut_module/test_convert_notebooks.py
|
Python
|
mit
| 1,224 | 0.000817 |
"""
@brief test log(time=0s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper import explore_folder_iterfile
from pyquickhelper.ipythonhelper import upgrade_notebook, remove_execution_number
class TestConvertNotebooks(unittest.TestCase):
"""Converts notebooks from v3 to v4. Should not be needed anymore."""
def test_convert_notebooks(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fold = os.path.abspath(os.path.dirname(__file__))
fold2 = os.path.
|
normpath(
os.path.join(fold, "..", "..", "_doc", "notebooks"))
for nbf in explore_fold
|
er_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
# remove numbers
remove_execution_number(nbf, nbf)
fold2 = os.path.normpath(os.path.join(fold, "..", "..", "_unittests"))
for nbf in explore_folder_iterfile(fold2, pattern=".*[.]ipynb"):
t = upgrade_notebook(nbf)
if t:
fLOG("modified", nbf)
if __name__ == "__main__":
unittest.main()
|
paour/weblate
|
weblate/trans/tests/test_commands.py
|
Python
|
gpl-3.0
| 8,853 | 0 |
# -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2014 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can
|
redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Lice
|
nse for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Tests for management commands.
"""
from django.test import TestCase
from weblate.trans.tests.test_models import RepoTestCase
from weblate.trans.models import SubProject
from django.core.management import call_command
from django.core.management.base import CommandError
import django
# Django 1.5 changes behavior here
if django.VERSION >= (1, 5):
COMMAND_EXCEPTION = CommandError
else:
COMMAND_EXCEPTION = SystemExit
class ImportProjectTest(RepoTestCase):
def test_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_po(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='po'
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_invalid(self):
project = self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
file_format='INVALID'
)
# We should have loaded none subprojects
self.assertEqual(project.subproject_set.count(), 0)
def test_import_aresource(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='android/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_import_aresource_format(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/values-*/strings.xml',
file_format='aresource',
base_file_template='%s/values/strings.xml',
)
# We should have loaded one subproject
self.assertEqual(project.subproject_set.count(), 1)
def test_re_import(self):
project = self.create_project()
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should have loaded four subprojects
self.assertEqual(project.subproject_set.count(), 4)
call_command(
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
# We should load no more subprojects
self.assertEqual(project.subproject_set.count(), 4)
def test_import_against_existing(self):
'''
Test importing with a weblate:// URL
'''
android = self.create_android()
project = android.project
self.assertEqual(project.subproject_set.count(), 1)
call_command(
'import_project',
project.slug,
'weblate://%s/%s' % (project.slug, android.slug),
'master',
'**/*.po',
)
# We should have loaded five subprojects
self.assertEqual(project.subproject_set.count(), 5)
def test_import_missing_project(self):
'''
Test of correct handling of missing project.
'''
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'**/*.po',
)
def test_import_missing_wildcard(self):
'''
Test of correct handling of missing wildcard.
'''
self.create_project()
self.assertRaises(
COMMAND_EXCEPTION,
call_command,
'import_project',
'test',
self.repo_path,
'master',
'*/*.po',
)
class BasicCommandTest(TestCase):
def test_versions(self):
call_command('list_versions')
class PeriodicCommandTest(RepoTestCase):
def setUp(self):
super(PeriodicCommandTest, self).setUp()
self.create_subproject()
def test_cleanup(self):
call_command(
'cleanuptrans'
)
def test_update_index(self):
# Test the command
call_command(
'update_index'
)
def test_list_checks(self):
call_command(
'list_ignored_checks'
)
call_command(
'list_ignored_checks',
list_all=True
)
call_command(
'list_ignored_checks',
count=10
)
class CheckGitTest(RepoTestCase):
'''
Base class for handling tests of WeblateCommand
based commands.
'''
command_name = 'checkgit'
def setUp(self):
super(CheckGitTest, self).setUp()
self.create_subproject()
def do_test(self, *args, **kwargs):
call_command(
self.command_name,
*args,
**kwargs
)
def test_all(self):
self.do_test(
all=True,
)
def test_project(self):
self.do_test(
'test',
)
def test_subproject(self):
self.do_test(
'test/test',
)
def test_nonexisting_project(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'notest',
)
def test_nonexisting_subproject(self):
self.assertRaises(
COMMAND_EXCEPTION,
self.do_test,
'test/notest',
)
class CommitPendingTest(CheckGitTest):
command_name = 'commit_pending'
class CommitGitTest(CheckGitTest):
command_name = 'commitgit'
class PushGitTest(CheckGitTest):
command_name = 'pushgit'
class LoadTest(CheckGitTest):
command_name = 'loadpo'
class UpdateChecksTest(CheckGitTest):
command_name = 'updatechecks'
class UpdateGitTest(CheckGitTest):
command_name = 'updategit'
class RebuildIndexTest(CheckGitTest):
command_name = 'rebuild_index'
def test_all_clean(self):
self.do_test(
all=True,
clean=True,
)
class LockTranslationTest(CheckGitTest):
command_name = 'lock_translation'
class UnLockTranslationTest(CheckGitTest):
command_name = 'unlock_translation'
class LockingCommandTest(RepoTestCase):
'''
Test locking and unlocking.
'''
def setUp(self):
super(LockingCommandTest, self).setUp()
self.create_subproject()
def test_locking(self):
subproject = SubProject.objects.all()[0]
self.assertFalse(
SubProject.objects.filter(locked=True).exists()
)
call_command(
'lock_translation',
'{0}/{1}'.format(
subproject.project.slug,
subproject.slug,
)
)
self.assertTr
|
askogvold/jtk
|
src/demo/jython/edu/mines/jtk/awt/PerceptualColorSpaceDemo.py
|
Python
|
apache-2.0
| 5,858 | 0.022704 |
#/****************************************************************************
# Copyright 2015, Colorado School of Mines and others.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#****************************************************************************/
import os,sys
from java.awt.image import *
from java.awt import *
from java.lang import *
from javax.swing import *
import math
from jarray import *
from edu.mines.jtk.awt import ColorMap
from edu.mines.jtk.mosaic import *
from edu.mines.jtk.dsp import LocalSmoothingFilter
from edu.mines.jtk.dsp import Sampling
from edu.mines.jtk.util.ArrayMath import *
from edu.mines.jtk.sgl import *
##############################################################################
# Perceptual Color Map Demo Using CIE L*a*b* Color Space
#
# Humans are terrible at differentiating colors. We can't help it -
# it's biology! The human eye has four types of receptors: the rods which are
# sensitive only to black, white and shades of gray, and cones of which there
# are three types, each responding to a different range of colors. In fact,
# those ranges have some degree of overlap, and not every wavelength range is
# adequately covered.
#
# Because of this, there exists two main sets of colors that are always
# competing for dominance and can not be perceived together: the Red-Green
# pair, and the Yellow-Blue pair. These are known as "color opponents".
#
# Conventional color models such as RGB and CMYK do not adequately reflect
# this physiological bias.
#
# The CIE L*a*b* (or CIELAB) color space addresses this by describing the
# colors visible to the human eye. It is a three-dimensional color space
# where L* represents the lightness of a color, a* represents a color's
# position between the red and green color opponents, and b* represents a
# color's position between blue and yellow.
#
# When we convert color maps and observe the lightness (L*) we immediately see
# we immediately see distinct inflection points which are observed to be bands
# or contours in the original color map. This can create biases when applied
# to scientific visualization by unnecessarily leading our eyes or creating
# false topography.
#
# There are two ways this demo addresses this. The first method smooths the
# lightness graph thereby reducing the inflection points, which essentially
# "smooths" the sharp bands of color when transitioning hues.
# The second method assigns a new monotonically increasing lightnes
|
s graph,
# which attempts to approximate that each value change is rep
|
resented by a
# change in perception.
#
# Author: Chris Engelsma
# Version: 2015.09.27
##############################################################################
def main(args):
pp1 = test1()
pp2 = test2()
# pp3 = test3()
pf = PlotFrame(pp1,pp2,PlotFrame.Split.HORIZONTAL)
pf.setDefaultCloseOperation(PlotFrame.EXIT_ON_CLOSE)
pf.setVisible(True)
return
def test1():
rgb,Lab = getRgbAndLab()
L = getLightnessFromLab(Lab)
return plot(L,icm)
def test2():
rgb,Lab = getRgbAndLab()
Lab = smoothLightness(Lab)
L = getLightnessFromLab(Lab)
icm2 = getNewColorModel(Lab)
return plot(L,icm2)
def test3():
rgb,Lab = getRgbAndLab()
Lab = setMonotonicallyIncreasingLightness(Lab)
L = getLightnessFromLab(Lab)
icm2 = getNewColorModel(Lab)
return plot(L,icm2)
def plot(L,icm):
pp = PlotPanel(2,1)
pv = pp.addPixels(0,0,f)
pv.setColorModel(icm)
pv.setOrientation(PixelsView.Orientation.X1DOWN_X2RIGHT)
pv.setInterpolation(PixelsView.Interpolation.LINEAR)
pov = pp.addPoints(1,0,L)
pov.setMarkStyle(PointsView.Mark.FILLED_CIRCLE)
pov.setMarkSize(2)
pov.setLineStyle(PointsView.Line.NONE)
pp.setHLabel(0,"Color value")
pp.setVLabel(1,"Lightness (L*)")
pp.setVLimits(1,0,100)
return pp
def getNewColorModel(Lab):
col = zeros(len(x),Color)
for i in range(len(x)):
j = 3*i
rgb = ColorMap.cieLabToRgb(Lab[j+0],Lab[j+1],Lab[j+2])
col[i] = Color(rgb[0],rgb[1],rgb[2]);
cm = ColorMap(0,1,col)
return cm.getColorModel()
def getRgbAndLab():
cm = ColorMap(icm)
Lab = zerofloat(n*3)
rgb = zerofloat(n*3)
color = zerofloat(3)
for i in range(len(x)):
cieLab = cm.getCieLabFloats(f[i])
color = cm.getRgbFloats(f[i])
rgb[3*i+0] = color[0]
rgb[3*i+1] = color[1]
rgb[3*i+2] = color[2]
Lab[3*i+0] = cieLab[0]
Lab[3*i+1] = cieLab[1]
Lab[3*i+2] = cieLab[2]
return rgb,Lab
def getLightnessFromLab(Lab):
L = zerofloat(len(Lab)/3)
for i in range(len(L)):
L[i] = Lab[3*i]
return L
def setUniformLightness(Lab,v):
for i in range(len(Lab)/3):
Lab[3*i] = v
return Lab
def setMonotonicallyIncreasingLightness(Lab):
for i in range(len(Lab)/3):
Lab[3*i] = i * (50.0/256.0) + 25
return Lab
def smoothLightness(Lab):
w = 10;
n = len(Lab)/3
for k in range(5):
for i in range(n):
lw = max(0,i-w)
rw = min(n,i+w)
val = 0.0
for j in range(lw,rw):
val += Lab[3*j]
val /= rw-lw
Lab[3*i] = val
return Lab
n = 256; d1 = .0039; f1 = 0.0;
x = rampfloat(f1,d1,n)
f = zerofloat(1,n)
for i in range(n):
f[i][0] = x[i]
s1 = Sampling(n,d1,f1)
icm = ColorMap.HUE
##############################################################################
class RunMain(Runnable):
def run(self):
main(sys.argv)
SwingUtilities.invokeLater(RunMain())
|
ncos/lisa
|
src/lisa_drive/scripts/venv/lib/python3.5/site-packages/pip-10.0.1-py3.5.egg/pip/_vendor/chardet/euctwprober.py
|
Python
|
mit
| 1,793 | 0.001673 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codin
|
gstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTW_SM_MODEL
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
super(EUCTWProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
self.distribution_analyzer = EUCTWDistributionAnalysis()
self.reset()
@property
def charse
|
t_name(self):
return "EUC-TW"
@property
def language(self):
return "Taiwan"
|
srpeiter/ChipDesignCad
|
testing_scripts/mario_drum.py
|
Python
|
gpl-3.0
| 674 | 0.093472 |
import numpy as np
from stcad.source_dev.chip import Base_Chip
from stcad.source_
|
dev.objects import Drum
import gdsCAD as cad
chipsize = 50
chip = Base_Chip('drum', chipsize, chipsize,label=False)
inductor = Drum(base_
|
layer = 1,
sacrificial_layer = 2 ,
top_layer = 3,
outer_radius = 9,
head_radius = 7,
electrode_radius = 6,
cable_width = 0.5,
sacrificial_tail_width = 3,
sacrificial_tail_length = 3,
opening_width = 4,
N_holes = 3,
hole_angle = 45,
hole_distance_to_center = 4.5,
hole_distance_to_edge = 0.5,
name = '')
chip.add_component(inductor, (0,0))
chip.save_to_gds(show=False, save=True,loc='')
|
nnscr/nnscr.de
|
pages/admin.py
|
Python
|
mit
| 430 | 0 |
fr
|
om django.contrib import admin
from django import forms
from . import models
from nnmarkdown.form import MarkdownWidget
from nnscr.admin import site
class PageAdminForm(forms.ModelForm):
class Meta:
model = models.Page
exclude = ("slug",)
widgets = {
"text": MarkdownWidget
}
class PageAdmin(admin.ModelAdmin):
form = PageAdminForm
site.register(models.Page, PageAdmin)
| |
tstirrat15/exercism-python-responses
|
kindergarten-garden/python/kindergarten-garden/garden.py
|
Python
|
gpl-2.0
| 1,032 | 0.002907 |
class Garden(object):
"""An object implementing a Kindergarten
Garden."""
def __init__(self, cup_string, students=None):
self.garden_rows = cup_string.split('\n')
|
if students:
self.class_list = sorted(students)
else:
self.class_list = [
"Alice", "Bob", "Charlie", "David",
"Eve", "Fred", "Ginny", "Harriet",
|
"Ileana", "Joseph", "Kincaid", "Larry"
]
self.plants_dict = {
"R": "Radishes",
"C": "Clover",
"G": "Grass",
"V": "Violets"
}
self.cups_per_child = 2
def plants(self, child_name):
index = self.cups_per_child * self.class_list.index(child_name)
child_plant_label_lists = [row[index:index + self.cups_per_child] for row in self.garden_rows]
child_plant_labels = ''.join(child_plant_label_lists)
child_plants = [self.plants_dict[label] for label in child_plant_labels]
return child_plants
|
andialbrecht/django-hvad
|
nani/tests/forms_inline.py
|
Python
|
bsd-3-clause
| 1,563 | 0.008317 |
# -*- coding: utf-8 -*-
from nani.admin import TranslatableModelAdminMixin
from nani.forms import translatable_inlineformset_factory
from nani.forms import TranslatableModelForm, TranslatableModelFormMetaclass
from nani.test_utils.context_managers import LanguageOverride
from nani.test_utils.testcase import NaniTestCase
from nani.test_utils.request_factory import RequestFactory
from testproject.app.models import Normal, Related
from django.db import models
class TestBasicInline(NaniTestCase):
def setUp(self):
|
with LanguageOverride("en"):
self.object = Normal.objects.language().create(shared_field="test", translated_field="translated test")
rf = RequestFactory()
self.request = rf.post('/url/')
def test_create_fields_inline(self):
with LanguageOverride("en"):
# Fixtures (should eventually be shared with other tests)
translate_mixin = TranslatableModel
|
AdminMixin()
formset = translatable_inlineformset_factory(translate_mixin._language(self.request),
Normal, Related)(#self.request.POST,
instance=self.object)
self.assertTrue(formset.forms[0].fields.has_key("normal"))
self.assertTrue(formset.forms[0].fields.has_key("translated"))
self.assertTrue(formset.forms[0].fields.has_key("translated_to_translated"))
self.assertFalse(formset.forms[0].fields.has_key("language_code"))
|
google-research/rigl
|
rigl/experimental/jax/datasets/cifar10_test.py
|
Python
|
apache-2.0
| 4,140 | 0.006522 |
# coding=utf-8
# Copyright 2022 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.datasets.cifar10."""
from absl.testing import absltest
import numpy as np
from rigl.experimental.jax.datasets import cifar10
class CIFAR10DatasetTest(absltest.TestCase):
"""Test cases for CIFAR10 Dataset."""
def setUp(self):
"""Common setup routines/variables for test cases."""
super().setUp()
self._batch_size = 16
self._batch_size_test = 10
self._shuffle_buffer_size = 8
self._dataset = cifar10.CIFAR10Dataset(
self._batch_size,
batch_size_test=self._batch_size_test,
shuffle_buffer_size=self._shuffle_buffer_size)
def test_create_dataset(self):
"""Tests creation of dataset."""
self.assertIsInstance(self._dataset, cifar10.CIFAR10Dataset)
def test_train_image_dims_content(self):
"""Tests dimensions and contents of test data."""
iterator = self._dataset.get_train()
sample = next(iterator)
image, label = sample['image'], sample['label']
with self.subTest(name='DataShape'):
self.assertTupleEqual(image.shape, (self._batch_size, 32, 32, 3))
with self.subTest(name='DataType'):
self.assertTrue(np.issubdtype(image.dtype, np.float))
with self.subTest(name='DataValues'):
# Normalized by stddev., expect nothing to fall outside 3 stddev.
self.assertTrue((image >= -3.).all() and (image <= 3.).all())
with self.subTest(name='LabelShape'):
self.assertLen(label, self._batch_size)
with self.subTest(name='LabelType'):
self.assertTrue(np.issubdtype(label.dtype, np.int))
with self.subTest(name='LabelValues'):
self.assertTrue((label >= 0).all() and
(label <= self._dataset.num_classes).all())
def test_test_image_dims_content(self):
"""Tests dimensions and contents of train data."""
iterator = self._dataset.get_test()
sample = next(iterator)
image, label = sample['image'], sample['label']
with self.subTest(name='DataShape'):
self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3))
with self.subTest(name='DataType'):
self.assertTrue(np.issubdtype(image.dtype, np.float))
with self.subTest(name='DataValues'):
|
# Normalized by stddev., expect nothing to fall outside 3 stddev.
self.assertTrue((image >= -3.).all() and (image <= 3.).all())
with self.subTest(name='LabelShape'):
|
self.assertLen(label, self._batch_size_test)
with self.subTest(name='LabelType'):
self.assertTrue(np.issubdtype(label.dtype, np.int))
with self.subTest(name='LabelValues'):
self.assertTrue((label >= 0).all() and
(label <= self._dataset.num_classes).all())
def test_train_data_length(self):
"""Tests length of training dataset."""
total_count = 0
for batch in self._dataset.get_train():
total_count += len(batch['label'])
self.assertEqual(total_count, self._dataset.get_train_len())
def test_test_data_length(self):
"""Tests length of test dataset."""
total_count = 0
for batch in self._dataset.get_test():
total_count += len(batch['label'])
self.assertEqual(total_count, self._dataset.get_test_len())
def test_dataset_nonevenly_divisible_batch_size(self):
"""Tests non-evenly divisible test batch size."""
with self.assertRaisesRegex(
ValueError, 'Test data not evenly divisible by batch size: .*'):
self._dataset = cifar10.CIFAR10Dataset(
self._batch_size, batch_size_test=101)
if __name__ == '__main__':
absltest.main()
|
WhySoGeeky/DroidPot
|
venv/lib/python2.7/site-packages/sphinx/util/parallel.py
|
Python
|
mit
| 4,139 | 0 |
# -*- coding: utf-8 -*-
"""
sphinx.util.parallel
~~~~~~~~~~~~~~~~~~~~
Parallel building utilities.
:copyright: Copyright 2007-2015 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import traceback
try:
import multiprocessing
import threading
except ImportError:
multiprocessing = threading = None
from six.moves import queue
from sphinx.errors import SphinxParallelError
# our parallel functionality only works for the forking Process
parallel_available = multiprocessing and (os.name == 'posix')
class SerialTasks(object):
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc=1):
pass
def add_task(self, task_func, arg=None, result_func=None):
if arg is not None:
res = task_func(arg)
else:
res = task_func()
if result_func:
result_func(res)
def join(self):
pass
c
|
lass ParallelTasks(object):
"""Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc):
self.nproc = nproc
# list of threads to join when waiting for completion
|
self._taskid = 0
self._threads = {}
self._nthreads = 0
# queue of result objects to process
self.result_queue = queue.Queue()
self._nprocessed = 0
# maps tasks to result functions
self._result_funcs = {}
# allow only "nproc" worker processes at once
self._semaphore = threading.Semaphore(self.nproc)
def _process(self, pipe, func, arg):
try:
if arg is None:
ret = func()
else:
ret = func(arg)
pipe.send((False, ret))
except BaseException as err:
pipe.send((True, (err, traceback.format_exc())))
def _process_thread(self, tid, func, arg):
precv, psend = multiprocessing.Pipe(False)
proc = multiprocessing.Process(target=self._process,
args=(psend, func, arg))
proc.start()
result = precv.recv()
self.result_queue.put((tid, arg) + result)
proc.join()
self._semaphore.release()
def add_task(self, task_func, arg=None, result_func=None):
tid = self._taskid
self._taskid += 1
self._semaphore.acquire()
thread = threading.Thread(target=self._process_thread,
args=(tid, task_func, arg))
thread.setDaemon(True)
thread.start()
self._nthreads += 1
self._threads[tid] = thread
self._result_funcs[tid] = result_func or (lambda *x: None)
# try processing results already in parallel
try:
tid, arg, exc, result = self.result_queue.get(False)
except queue.Empty:
pass
else:
del self._threads[tid]
if exc:
raise SphinxParallelError(*result)
result_func = self._result_funcs.pop(tid)(arg, result)
if result_func:
result_func(result)
self._nprocessed += 1
def join(self):
while self._nprocessed < self._nthreads:
tid, arg, exc, result = self.result_queue.get()
del self._threads[tid]
if exc:
raise SphinxParallelError(*result)
result_func = self._result_funcs.pop(tid)(arg, result)
if result_func:
result_func(result)
self._nprocessed += 1
# there shouldn't be any threads left...
for t in self._threads.values():
t.join()
def make_chunks(arguments, nproc, maxbatch=10):
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = min(nargs // nproc, maxbatch)
if chunksize == 0:
chunksize = 1
nchunks, rest = divmod(nargs, chunksize)
if rest:
nchunks += 1
# partition documents in "chunks" that will be written by one Process
return [arguments[i*chunksize:(i+1)*chunksize] for i in range(nchunks)]
|
JuPeg/tools-artbio
|
unstable/local_tools/clustering4.py
|
Python
|
mit
| 6,082 | 0.024005 |
#!/usr/bin/python
# script find clusters of small RNA reads in the genome
# version 3 - 24-12-2013 evolution to multiprocessing
# Usage clustering.py <bowtie input> <output> <bowtie index> <clustering_distance> <minimum read number per cluster to be outputed> <collapse option> <extention value> <average_cluster_size>
# <folding> <output format>
import sys, subprocess, time
from collections import defaultdict # required for some SmRNAwindow attributes (readDic)
#from numpy import mean, std # required for some SmRNAwindow methods
#from scipy import stats
from smRtools import *
import multiprocessing
def clustering (Instance):
def clustermining (cluster, Instance): # cluster argument is a list
if Instance.readDict[-cluster[0]]: # test whether the first position in the cluster was reverse reads
shift = max(Instance.readDict[-cluster[0]])
upstream_coord = cluster[0] - shift + 1
else:
upstream_coord = cluster[0]
if Instance.readDict[cluster[-1]]: # test whether the last position in the cluster was forward reads
shift = max(Instance.readDict[cluster[-1]])
downstream_coord = cluster[-1] + shift -1
else:
downstream_coord = cluster[-1]
readcount = Instance.readcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
mean_size, median_size, stdv_size = Instance.statsizes(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
if readcount >= minimum_reads and median_size >= min_median_size:
location = [Instance.gene.split()[0], upstream_coord, downstream_coord]
if output_format == "intervals":
return "%s\t%s\t%s\t%s" % (location[0], location[1], location[2], readcount)
cluster_size = downstream_coord - upstream_coord + 1
if folding == "yes" and cluster_size < 151:
foldEnergy = Instance.foldEnergy(upstream_coord=upstream_coord, downstream_coord=downstream_coord) ## be careful, test !
else:
foldEnergy = "."
forwardReadcount = Instance.forwardreadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
reverseReadcount = Instance.reversereadcount(upstream_coord=upstream_coord, downstream_coord=downstream_coord) #
density = readcount / float(cluster_size) #
if output_format == "GFF3":
if forwardReadcount >= reverseReadcount:
GFFstrand = "+"
else:
GFFstrand = "-"
Attributes = "ID=RC %s : FR %s : RR %s : Dens %s : Med %s : FE %s" % (readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy)
return "%s\tGalaxy\tRead_Cluster\t%s\t%s\t%s\t%s\t.\t%s" % (location[0], location[1], location[2], readcount, GFFstrand, Attributes)
else:
Forward_Barycenter, Reverse_Barycenter = Instance.barycenter(upstream_coord=upstream_coord, downstream_coord=downstream_coord)
Zsignature = Instance.signature(24,29,24,29,range(1,27), zscore="yes", upstream_coord=upstream_coord, downstream_coord=downstream_coord)[10] #
Hsignature = Instance.hannon_signature(24,29,24,29, range(1,27), upstream_coord=upstream_coord, downstream_coord=downstream_coord )[10] * 100
UpiFreq = Instance.Ufreq(range(24,29), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
UsiFreq = Instance.Ufreq(range(20,22), upstream_coord=upstream_coord, downstream_coord=downstream_coord)
return "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (location[0], location[1], location[2], cluster_size, readcount, forwardReadcount, reverseReadcount, density, median_size, foldEnergy, Forward_Barycenter, Reverse_Barycenter, Zsignature, Hsignature, UpiFreq, UsiFreq)
return False
l = Instance.readDict.keys()
l=[abs(i) for i in l]
l=list(set(l))
l.sort()
upstream = 0
cluster_list = []
for i, element in enumerate (l[1:]):
if abs(element-l[i]) > dist or i+2==len(l): # the 2nd part of the logical test is to capture the last cluster if it overlaps the end of the list
cluster = l[upstream:i+1]
upstream = i+1
cluster_list.append(cluster)
result_list = []
for i in cluster_list:
totestresult = clustermining (i, Instance)
if totestresult: result_list.append(totestresult)
del Instance #
return result_list
def logtask (results):
global number_of_clusters
if results:
number_of_clusters += len(results)
LOG.append(results)
return
if __name__ == '__main__':
start_time = time.time()
fasta_dic = get_fasta (sys.argv[3])
objDic = {}
number_of_reads = 0
F = open (sys.argv[1], "r") # F is the bowtie output taken as input
for line in F:
number_of_reads += 1
fields = line.split()
polarity = fields[1]
gene = fields[2]
offset = int(fields[3])
size = len (fields[4])
try:
objDic[gene].addread (polarity, offset, size)
except KeyError:
objDic[gene] = SmRNAwindow(gene, fasta_dic[gene])
objDic[gene].addread (polarity, offset, size)
F.close()
OUT = open (sys.argv[2], "w")
output_format=sys.argv[8]
if output_format == "intervals":
print >> OUT, "#chrom\tStart\tEnd\tReadCount"
elif output_format == "GFF3":
print >> OUT, "##gff-version 3"
else:
print >> OUT, "#ID\t#chrom\tStart\tEnd\tLength\tReadCount\tForwardReads\tReverseReads\tDensity\tMedian\tFoldEnergy\tForBar\tRevBar\tz-score_signature\tHannon_signature\tUfreq_in_24-28RNAs\tUfreq_in_20-21RNs"
dist = int(sys.argv[4])
min_median_size = int(sys.argv[6])
minimum_reads = int(sys.argv[5])
number_of_clusters = 0
Instance_ID = 0
folding=sys.argv[7]
pool = multiprocessing.Pool(4)
LOG = []
instance_list = []
for instance in objDic.keys():
instance_list.append(objDic[instance])
del objDic
pool.map_async(clustering, instance_list, c
|
allback=logtask)
pool.close()
pool.join()
for lines in LOG:
for line in lines:
pr
|
int >> OUT, line
OUT.close()
elapsed_time = time.time() - start_time
print "number of reads: %s\nnumber of clusters: %s\ntime: %s" % (number_of_reads, number_of_clusters, elapsed_time)
|
midonet/mcp
|
deimos/config.py
|
Python
|
apache-2.0
| 5,937 | 0.002358 |
from ConfigParser import SafeConfigParser, NoSectionError
import json
import logging
import os
import sys
import deimos.argv
import deimos.docker
from deimos.logger import log
import deimos.logger
from deimos._struct import _Struct
def load_configuration(f=None, interactive=sys.stdout.isatty()):
error = None
defaults = _Struct(docker=Docker(),
index=DockerIndex(),
containers=Containers(),
uris=URIs(),
state=State(),
log=Log(
console=(logging.DEBUG if interactive else None),
syslog=(logging.INFO if not interactive else None)
))
parsed = None
try:
f = f if f else path()
if f:
parsed = parse(f)
except Exception as e:
error = e
finally:
confs = defaults.merge(parsed) if parsed else defaults
deimos.logger.initialize(**dict(confs.log.items()))
if error:
pre = ("Error loading %s: " % f) if f else ""
log.exception(pre + str(error))
sys.exit(16)
if parsed:
log.info("Loaded configuration from %s" % f)
for _, conf in parsed.items():
log.debug("Found: %r", conf)
return confs
def coercearray(array):
if type(array) in deimos.argv.strings:
if array[0:1] != "[":
return [array]
try:
arr = json.loads(array)
if type(arr) is not list:
raise ValueError()
return arr
except:
raise ValueError("Not an array: %s" % array)
return list(array)
def coerceloglevel(level):
if not level:
return
if type(level) is int:
return level
levels = {"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL,
"NOTSET": logging.NOTSET}
try:
return levels[level]
except:
raise ValueError("Not a log level: %s" % level)
def coercebool(b):
if type(b) is bool:
return b
try:
bl = json.loads(b)
if type(bl) is not bool:
raise ValueError()
return bl
except:
raise ValueError("Not a bool: %s" % b)
def coerceoption(val):
try:
return coercearray(val)
except:
return coercebool(val)
class Image(_Struct):
def __init__(self, default=None, ignore=False):
_Struct.__init__(self, default=default, ignore=coercebool(ignore))
def override(self, image=None):
return image if (image and not self.ignore) else self.default
class Options(_Struct):
def __init__(self, default=[], append=[], ignore=False):
_Struct.__init__(self, default=coercearray(default),
append=coer
|
cearray(append),
ignore=coercebool(ignore))
def override(self, options=[]):
a = options if (len(options) > 0 and not self.ignore) else self.default
return a + self.append
class Containers(_Struct):
def __init__(self, image=Image(), options=Options()):
_Struct.__init__(self, image=image, options=options)
|
def override(self, image=None, options=[]):
return self.image.override(image), self.options.override(options)
class URIs(_Struct):
def __init__(self, unpack=True):
_Struct.__init__(self, unpack=coercebool(unpack))
class Log(_Struct):
def __init__(self, console=None, syslog=None):
_Struct.__init__(self, console=coerceloglevel(console),
syslog=coerceloglevel(syslog))
class Docker(_Struct):
def __init__(self, **properties):
for k in properties.keys():
properties[k] = coerceoption(properties[k])
_Struct.__init__(self, **properties)
def argv(self):
return deimos.argv.argv(**dict(self.items()))
class DockerIndex(_Struct):
def __init__(self, index=None, account_libmesos="libmesos",
account=None,
dockercfg=None):
_Struct.__init__(self, index=index,
account_libmesos=account_libmesos,
account=account,
dockercfg=dockercfg)
class State(_Struct):
def __init__(self, root="/tmp/deimos"):
if ":" in root:
raise ValueError("Deimos root storage path must not contain ':'")
_Struct.__init__(self, root=root)
def parse(f):
config = SafeConfigParser()
config.read(f)
parsed = {}
sections = [("log", Log), ("state", State), ("uris", URIs),
("docker", Docker),
("docker.index", DockerIndex),
("containers.image", Image),
("containers.options", Options)]
for key, cls in sections:
try:
parsed[key] = cls(**dict(config.items(key)))
except:
continue
containers = {}
if "containers.image" in parsed:
containers["image"] = parsed["containers.image"]
del parsed["containers.image"]
if "containers.options" in parsed:
containers["options"] = parsed["containers.options"]
del parsed["containers.options"]
if len(containers) > 0:
parsed["containers"] = Containers(**containers)
if "docker.index" in parsed:
parsed["index"] = parsed["docker.index"]
del parsed["docker.index"]
return _Struct(**parsed)
def path():
for p in search_path:
if os.path.exists(p):
return p
search_path = ["./deimos.cfg",
os.path.expanduser("~/.deimos"),
"/etc/deimos.cfg",
"/usr/etc/deimos.cfg",
"/usr/local/etc/deimos.cfg"]
|
usmansher/hangman
|
main.py
|
Python
|
apache-2.0
| 4,488 | 0.011809 |
#!/usr/bin/python
'''
Title: Hangman
Description: A Simple Hangman Game
Author: Usman Sher (@usmansher)
Disclaimer: Its Just A Small Guessing Game made By Me (Beginning Of Coding).
'''
# Imports
import pygame, sys
from pygame.locals import *
from random import choice
# Color Variables
RED = (255, 0, 0)
GREEN = (0, 255, 0)
ORANGE = (255, 100, 0)
BLUE = (0, 0, 255)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Get The Words From a Text File
def getWords():
f = open('wordlist.txt')
words = []
getLines = f.readline()
while getLines:
words.append(getLines.strip())
getLines = f.readline()
return words
# Word Spaces
def drawWordSpaces(screen, spaces):
x = 10
for i in range(spaces):
pygame.draw.line(screen, ORANGE, (x, 350), (x+20, 350), 3)
x += 30
# Letters
def drawLetter(screen, font, word, guess):
x = 10
for letter in word:
if letter == guess:
letter = font.render(letter, 3, BLACK)
screen.blit(letter, (x, 300))
x += 30
# Gallows
def drawGallows(screen):
pygame.draw.rect(screen, BLUE, (450, 350, 100, 10))
pygame.draw.rect(screen, BLUE, (495, 250, 10, 100))
pygame.draw.rect(screen, BLUE, (450, 250, 50, 10))
pygame.draw.rect(screen, BLUE, (450, 250, 10, 25))
# Body Parts
def drawMan(screen, bodyPart):
if bodyPart == 'head':
pygame.draw.circle(screen, RED, (455, 285), 10)
if bodyPart == 'body':
pygame.draw.rect(screen, RED, (453, 285, 4, 50))
if bodyPart == 'lArm':
pygame.draw.line(screen, RED, (455, 310), (445, 295), 3)
if bodyPart == 'rArm':
pygame.draw.line(screen, RED, (455, 310), (465, 295), 3)
if bodyPart == 'lLeg':
pygame.draw.line(screen, RED, (455, 335), (445, 345), 3)
if bodyPart == 'rLeg':
pygame.draw.line(screen, RED, (455, 335), (465, 345), 3)
# The Main Function
def main():
x = 800
y = 500
pygame.init() # Initialize Pygame
screen = pygame.display.set_mode((x, y)) # Set The Screen Size
pygame.display.set_caption('Hangman By Usman Sher')
screen.fill(WHITE) # Fill The Background
font = pygame.font.SysFont('Courier New', 40) # Set Font & Size
drawGallows(screen) # Draw The Gallows
guessed = ''
words = getWords() # Get Words
word = choice(words) # Get one word from words
drawWordSpaces(screen, len(word)) # Draw The Word Spaces
print word
body = ['rLeg', 'lLeg', 'rArm', 'lArm', 'body', 'head'] # Body Parts
correct = ''
unique = set(word)# Get Unique Words from the Word
pygame.display.update()# Update The Display
while body and len(correct) < len(unique): # While Bodyparts or Correct Guess is less than Unique Words
# Keyboard Events
for event in pygame.event.get():
# Enable the Quit Button
if event.type == QUIT:
sys.exit()
# If Key is pressed
if event.type == KEYDOWN:
# Check Whether Its a Alphabet or not
if event.unicode.isalpha():
guess = event.unicode #Store Alphabet in variable guess
# Check Whether Guessed Word is Right Or Wrong
if guess in word and guess not in correct:
#if it is
drawLetter(screen, font, word, guess) #Print The Letter on Screen
pygame.display.update() # Update The Display
correct += guess # Add Guessed Letter to Correct
elif guess not in guessed:
# If Its Wrong
bodyPart = body.pop() # Delete a Bodypart and add it the the variable bodyPart
drawMan(screen, bodyPart) # Draw the Man
|
with the Popped Bodypart
pygame.display.update() # Update the Display
guessed += guess # Add it to variable guessed
if body: # Check Whether theres a part left in variable body
text = 'You Won!'# If True
else:
text = 'You Lose! The word was '+ word # If False
# print the Text
endMessage = font.render(text, 3, BLACK)
screen.blit(endMessage, (0, 0))
pygame.display.update()
# Enable Quit Button
wh
|
ile True:
for event in pygame.event.get():
if event.type == QUIT:
sys.exit()
# Run The Program
if __name__ == '__main__':
main()
|
vprusso/youtube_tutorials
|
twitter_python/part_1_streaming_tweets/tweepy_streamer.py
|
Python
|
gpl-3.0
| 1,932 | 0.006729 |
# YouTube Video: https://www.youtube.com/watch?v=wlnx-7cm4Gg
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import twitter_credentials
# # # # TWITTER STREAMER # # # #
class TwitterStreamer():
"""
Class for streaming and processing live tweets.
"""
def __init__(self):
pass
def stream_tweets(self, fetched_tweets_filename, hash_tag_list):
# This handles Twitter authetification and the connection to Twitter Streaming API
listener = StdOutListener(fetched_tweets_filename)
auth = OAuthHandler(twitter_credentials.CONSUMER_KEY, twitter_credentials.CONSUMER_SECRET)
auth.set_access_token(twitter_credentials.ACCESS_TOKEN, twitter_credentials.ACCESS_TOKEN_SECRET)
stream = Stream(auth, listener)
# This line filter Twitter Streams to capture data by the keywords:
stream.filter(track=hash_tag_list)
# # # # TWITTER STREAM LISTENER # # # #
class StdOutListener(StreamListener):
"""
This is a basic listener that just prints received tweets to stdout.
"""
def
|
__init__(self, fetched_tweets_filename):
self.fetched_tweets_filename = fetched_tweets_filename
def on_data(self, data):
try:
print(data)
with open(self.fetched_tweets_filename, 'a') as tf:
tf.write(data)
return True
except BaseException as e:
print("Error on_data %s" % str(e))
return True
def on_error(self, sta
|
tus):
print(status)
if __name__ == '__main__':
# Authenticate using config.py and connect to Twitter Streaming API.
hash_tag_list = ["donal trump", "hillary clinton", "barack obama", "bernie sanders"]
fetched_tweets_filename = "tweets.txt"
twitter_streamer = TwitterStreamer()
twitter_streamer.stream_tweets(fetched_tweets_filename, hash_tag_list)
|
wiliamsouza/mandriva-control-center
|
bin/services-mechanism.py
|
Python
|
gpl-2.0
| 169 | 0.011834 |
#!/usr/bin/python
import sys
sys.path.append('/usr/share/mandriva/')
from mcc2.backends.services.service import Services
if __name__ == '__main__':
Services.main
|
()
|
|
jiobert/python
|
Smith_Ben/Assignments/email_validation/emai.py
|
Python
|
mit
| 1,552 | 0.027706 |
from flask import Flask, request, redirect, render_template, session, flash
from mysqlconnection import MySQLConnector
import re
app = Flask(__name__)
mysql = MySQLConnector(app, 'emailval')
app.secret_key = 'secret'
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
@app.route('/')
def validation():
return render_template('validation.html')
@app.route('/emails', methods=['POST'])
def email():
if not EMAIL_REGEX.match(reque
|
st.form['buttonbox']):
flash('invalid emale')
return redirect('/')
else:
flash ('Great Job!');
query = "INSERT INTO email (email,updated_at,created_at) VALUES (:email,NOW(),NOW())"
data = {'email':request.form['buttonbox']}
mysql.query_db(query,data)
query = "SELECT created_at FROM email"
query = "SELECT * FROM email"
email = mysql.query_db(query)
# if len(request.form['buttonbox']) < 1:
# flas
|
h('need a proper emale')
return render_template('email.html', email = email)
# @app.route('/emails')
# def show(email_id):
# query = "SELECT * FROM email WHERE id = :specific_id"
# data = {'specific_id': email_id}
# emails = mysql.query_db(query, data)
# return render_template('email.html', email = email)
@app.route('/delete/<id>')
def delete(id):
query = "DELETE FROM email WHERE id = :id"
data = {'id': id}
mysql.query_db(query, data)
flash("The email address ID {} has been deleted".format(id))
query = "SELECT * FROM email"
email = mysql.query_db(query)
return render_template('email.html', email = email)
app.run(debug=True)
|
widdowquinn/find_differential_primers
|
diagnostic_primers/nucmer.py
|
Python
|
mit
| 17,605 | 0.001477 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""nucmer.py
Provides helper functions to run nucmer from pdp
(c) The James Hutton Institute 2018
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2018 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from collections import namedtuple
from itertools import permutations
from diagnostic_primers import PDPException
from diagnostic_primers.sge_jobs import Job
# convenience factories for nucmer results
# Information useful for context of nucmer comparison output
NucmerOutput = namedtuple(
"NucmerOutput", "query subject out_delta out_filter cmd_nucmer cmd_delta"
)
# Parsed nucmer .delta file data
NucmerDelta = namedtuple(
"NucmerDelta", "deltafile queryfile subjectfile query_intervals"
)
# Exceptions for nucmer processing
class PDPNucmerException(PDPException):
"""Exception thrown during nucmer run or processing"""
def __init__(self, msg="Error in `nucmer` processing"):
PDPException.__init__(self, msg)
# Class to describe a nucmer command, for consistency with Biopython commands
class NucmerCommand(object):
"""Command-line for nucmer"""
def __init__(self, cline, infile, outfile):
self.cline = cline
self.infile = infile
self.outfile = outfile
def __str__(self):
return " ".join(self.cline)
# Class to describe a delta-filter command, for consistency with Biopython commands
class DeltaFilterCommand(object):
"""Command-line for delta-filter"""
def __init__(self, cline, infile, outfile):
self.cline = cline
self.infile = infile
self.outfile = outfile
def __str__(self):
return " ".join(self.cline)
class DeltaData(object):
"""Class to hold MUMmer/nucmer output "delta" data
This is required because the ordering within files differs depending on MUMmer
build, for the same version (as evidenced by differences between OSX and Linux
builds), and a means of testing for equality of outputs is necessary.
The output file structure and format is described at
http://mummer.sourceforge.net/manual/#nucmeroutput
Each file is represented as:
- header: first line of the .delta file, naming the two input comparison files; stored
as a tuple (path1, path2), returned as the combined string; the individual
files are stored as self._query and self._subject
- program: name of the MUMmer program that produced the output
- query: path to the query sequence file
- subject: path to the subject sequence file
"""
def __init__(self, name, handle=None):
self.name = name
self._metadata = None
self._comparisons = []
if handle is not None:
self.from_delta(handle)
def from_delta(self, handle):
"""Populate the object from the passed .delta or .filter filehandle"""
parser = DeltaIterator(handle)
for element in parser:
if isinstance(element, DeltaMetadata):
self._metadata = element
if isinstance(element, DeltaComparison):
self._comparisons.append(element)
@property
def comparisons(self):
"""Comparisons in the .delta file"""
return self._comparisons
@property
def metadata(self):
"""Metadata from the .delta file"""
return self._metadata
@property
def reference(self):
"""The reference file for the MUMmer comparison"""
return self._metadata.reference
@property
def program(self):
"""The MUMmer program used for the comparison"""
return self._metadata.program
@property
def query(self):
"""The query file for the MUMmer comparison"""
return self._metadata.query
def __eq__(self, other):
# We do not enforce equality of metadata, as the full path to both query and reference is
# written in the .delta file, and we care only about the alignment data, and the program
# that was used.
if not isinstance(other, DeltaData):
return False
return (self.program == other.program) and (
self._comparisons == other._comparisons
)
def __len__(self):
return len(self._comparisons)
def __str__(self):
"""Return the object in .delta format output"""
outstr = os.linesep.join(
[str(self._metadata)] + [str(_) for _ in self._comparisons]
)
return outstr
class DeltaMetadata(object):
"""Represents the metadata header for a MUMmer .delta file"""
def __init__(self):
self.reference = None
self.query = None
self.program = None
def __eq__(self, other):
if not isinstance(other, DeltaMetadata):
return False
return (self.reference, self.query, self.program) == (
other.reference,
other.query,
other.program,
)
def __str__(self):
return "{} {}{}{}".format(self.reference, self.query, os.linesep, self.program)
class DeltaComparison(object):
"""Represents a comparison between two sequences in a .delta file"""
def __init__(self, header, alignments):
self.header = header
self.alignments = alignments
def add_alignment(self, aln):
"""Add passed alignment to this object
:param aln: DeltaAlignment object
"""
self.alignments.append(aln)
def __eq__(self, other):
if not isinstance(other, DeltaComparison):
return False
return (self.header == other.header) and (
sorted(self.alignments) == sorted(other.alignments)
)
def __len__(self):
return len(self.alignments)
def __str__(self):
outstr = os.linesep.join([str(self.header)] + [str(_) for _ in self.alignments])
return outstr
class DeltaHeader(object):
"""Represents a single sequence comparison header from a MUMmer .delta file"""
def __init__(self, reference, query, reflen, querylen):
self.reference = reference
self.query = query
self.referencelen = int(reflen)
self.querylen = int(querylen)
def __eq__(self, other):
if not isinstance(other, DeltaHeader):
return False
return (self.reference, self.query, self.referencelen, self.querylen) == (
|
other.reference,
other.query,
other.referencelen,
other.querylen,
)
def __str__(self):
return ">{} {} {} {}".format(
self.reference, self.query, self.referencelen, self.queryle
|
n
)
class DeltaAlignment(object):
"""Represents a single alignment region and scores for a pairwise comparison"""
def __init__(self, refstart, refend, qrystart, qryend, errs, simerrs, stops):
self.refstart = int(refstart)
self.refend = int(refend)
self.querystart = int(qrystart)
self
|
lampwins/netbox
|
netbox/secrets/api/urls.py
|
Python
|
apache-2.0
| 763 | 0.003932 |
from rest_framework import routers
from . import views
class SecretsRootView(routers.APIRootView):
"""
Secrets API root view
"""
def get_view_name(self):
return 'Secrets'
router = routers.DefaultRouter()
router.APIRootView = SecretsRootView
# Field choices
router.register(r'_choices', views.SecretsFieldChoicesViewSet, basename='field-choice')
# Secrets
router.register(r'
|
secret-roles', views.SecretRoleViewSet)
router.register(r'secrets', views.SecretViewSet)
# Miscellaneous
router.register(r'get-session-key', views.GetSessionKeyViewSet, basename='get-session-key')
router.register(r'generate-rsa-key-pair', views.GenerateRSAKeyPairVie
|
wSet, basename='generate-rsa-key-pair')
app_name = 'secrets-api'
urlpatterns = router.urls
|
cfossace/test
|
pyscript/pyscript2.py
|
Python
|
mit
| 1,181 | 0.018628 |
import requests
import hashlib
import os
import json
USERNAME = 'christine'
API_KEY = 'd0e4164c2bd99f1f888477fc25cf8c5c104a5cd
|
1'
#Read in the path with user input (or navigate to the directory in the GUI)
#path = '/home/wildcat/Lockheed/laikaboss/malware/'
os.chdir("/home/wildcat/Lockheed/laikaboss")
print("Hint: /home/wildcat/Lockheed/laikaboss/malware/")
path = raw_input("Enter the path of your file: ")
for f in os.listdir(path):
os.system("sudo python laika.py {} | jq '.scan_result[]' > /home/wildcat/Lockheed/crits/pyscript/mal3/{}.out".forma
|
t(os.path.join(path,f), f))
os.chdir("/home/wildcat/Lockheed/crits/pyscript/mal3/")
path2 = "/home/wildcat/Lockheed/crits/pyscript/mal3/"
for f in os.listdir(path2):
read_data = open(f,'r')
md5_data = json.load(read_data)
file_data = open(f, 'r').read()
md5 = md5_data['moduleMetadata']['META_HASH']['HASHES']['md5']
data = {'upload_type': 'metadata',
'filename': f,
'md5': md5,
'source': 'Christine'}
files = {'filedata': open(f, 'rb')}
url = 'http://localhost:8080/api/v1/samples/?username={0}&api_key={1}'.format(USERNAME, API_KEY)
r = requests.post(url, data=data, files=files)
|
claytondaley/mongo-async-python-driver
|
examples/deferreds/insert.py
|
Python
|
apache-2.0
| 1,602 | 0.001248 |
#!/usr/bin/env python
# coding: utf-8
import sys
import time
from twisted.internet import defer, reactor
from twisted.python import log
import txmongo
def getConnection():
print "getting connection..."
return txmongo.MongoConnectionPool()
def getDatabase(conn, dbName):
print "getting database..."
return getattr(conn, dbName)
def getCollection(db, collName):
print "getting collection..."
return getattr(db, collName)
def insertData(coll):
print "inserting data..."
# insert some data, building a deferred list so that we can later check
# the succes or failure of each deferred result
deferreds = []
for x in xrange(10000):
d = coll.insert({"
|
something":x*time.time()}, safe=True)
deferreds.append(d)
return defer.DeferredList(defer
|
reds)
def processResults(results):
print "processing results..."
failures = 0
successes = 0
for success, result in results:
if success:
successes += 1
else:
failures += 1
print "There were %s successful inserts and %s failed inserts." % (
successes, failures)
def finish(ignore):
print "finishing up..."
reactor.stop()
def example():
d = getConnection()
d.addErrback(log.err)
d.addCallback(getDatabase, "foo")
d.addCallback(getCollection, "test")
d.addCallback(insertData)
d.addErrback(log.err)
d.addCallback(processResults)
d.addErrback(log.err)
d.addCallback(finish)
return d
if __name__ == '__main__':
log.startLogging(sys.stdout)
example()
reactor.run()
|
Cadair/pysac
|
examples/mhs_atmosphere/spruit_atmosphere.py
|
Python
|
bsd-2-clause
| 12,258 | 0.007016 |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 11 13:55:17 2014
@author: sm1fg
This is the main module to construct a magnetohydrostatic solar atmosphere,
given a specified magnetic network of self-similar magnetic flux tubes and
save the output to gdf format.
To select an existing configuration change the import as model_pars, set Nxyz,
xyz_SI and any other special parameters, then execute mhs_atmopshere.
To add new configurations:
add the model options to set_options in parameters/options.py;
add options required in parameters/model_pars.py;
add alternative empirical data sets to hs_model/;
add alternativ table than interploate_atmosphere in hs_model/hs_atmosphere.py;
add option to get_flux_tubes in mhs_model/flux_tubes.py
If an alternative formulation of the flux tube is required add options to
construct_magnetic_field and construct_pairwise_field in
mhs_model/flux_tubes.py
Plotting options are included in plot/mhs_plot.py
"""
import os
import numpy as np
import pysac.mhs_atmosphere as atm
import astropy.units as u
from pysac.mhs_atmosphere.parameters.model_pars import spruit as model_pars
#==============================================================================
#check whether mpi is required and the number of procs = size
#==============================================================================
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
l_mpi = True
l_mpi = l_mpi and (size != 1)
except ImportError:
l_mpi = False
rank = 0
size = 1
#==============================================================================
#set up model parameters
#==============================================================================
local_procs=1
#optional coordinate - resolution
model_pars['Nxyz'] = [64,64,128] # 3D grid
model_pars['xyz'] = [-0.63*u.Mm,0.63*u.Mm,-0.63*u.Mm,0.63*u.Mm,0.0*u.Mm,12.7*u.Mm] #grid size
#standard set of logical switches
option_pars = atm.set_options(model_pars, l_mpi, l_gdf=True)
#standard conversion to dimensionless units and physical constants
scales, physical_constants = \
atm.get_parameters()
# select the option in the next line
option_pars['l_linear'] = True
# Alfven speed constant along the axis of the flux tube
if option_pars['l_const']:
option_pars['l_B0_quadz'] = True
model_pars['chrom_scale'] *= 5e1
model_pars['p0'] *= 1.5e1
physical_constants['gravity'] *= 1.
model_pars['radial_scale'] *= 1.
# Alfven speed proportional to sqrt(Z) along the axis of the flux tube
elif option_pars['l_sqrt']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 5.65e-3
model_pars['p0'] *= 1.
physical_constants['gravity'] *= 7.5e3
model_pars['radial_scale'] *= 0.7
# Alfven speed proportional to Z along the axis of the flux tube
elif option_pars['l_linear']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 0.062
model_pars['p0'] *= 3e2
physical_constants['gravity'] *= 8e3
model_pars['radial_scale'] *= 1.
# Alfven speed proportional to Z^2 along the axis of the flux tube
elif option_pars['l_square']:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 1.65
model_pars['p0'] *= 2e4
physical_constants['gravity'] *= 5e4
model_pars['radial_scale'] *= 1.
# Alfven speed not defined along the axis of the flux tube
else:
option_pars['l_B0_rootz'] = True
model_pars['chrom_scale'] *= 1.
model_pars['p0'] *= 1.
#obtain code coordinates and model parameters in astropy units
coords = atm.get_coords(model_pars['Nxyz'], u.Quantity(model_pars['xyz']))
#==============================================================================
#calculate 1d hydrostatic balance from empirical density profile
#==============================================================================
pressure_Z, rho_Z, Rgas_Z = atm.get_spruit_hs(coords['Z'],
model_pars,
physical_constants,
option_pars
)
#==============================================================================
# load flux tube footpoint parameters
#==============================================================================
# axial location and value of Bz at each footpoint
xi, yi, Si = atm.get_flux_tubes(
model_pars,
coords,
option_pars
)
#==============================================================================
# split domain into processes if mpi
#==============================================================================
ax, ay, az = np.mgrid[coords['xmin']:coords['xmax']:1j*model_pars['Nxyz'][0],
coords['ymin']:coords['ymax']:1j*model_pars['Nxyz'][1],
coords['zmin']:coords['zmax']:1j*model_pars['Nxyz'][2]]
# split the grid between processes for mpi
if l_mpi:
x_chunks = np.array_split(ax, size, axis=0)
y_chunks = np.array_split(ay, size, axis=0)
z_chunks = np.array_split(az, size, axis=0)
x = comm.scatter(x_chunks, root=0)
y = comm.scatter(y_chunks, root=0)
z = comm.scatter(z_chunks, root=0)
else:
x, y, z = ax, ay, az
x = u.Quantity(x, unit=coords['xmin'].unit)
y = u.Quantity(y, unit=coords['ymin'].unit)
z = u.Quantity(z, unit=coords['zmin'].unit)
#==============================================================================
# initialize zero arrays in which to add magnetic field and mhs adjustments
#==============================================================================
Bx = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic x-component
By = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic y-component
Bz = u.Quantity(np.zeros(x.shape), unit=u.T) # magnetic z-component
pressure_m = u.Quantity(np.zeros(x.shape), unit=u.Pa) # magneto-hydrostatic adjustment to pressure
rho_m = u.Quantity(np.zeros(x.shape), unit=u.kg/u.m**3) # magneto-hydrostatic adjustment to density
# initialize zero arrays in which to add balancing forces and magnetic tension
Fx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force x-component
Fy = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3) # balancing force y-component
# total tension force for comparison with residual balancing force
Btensx = u.Quantity(np.zeros(x.shape), unit=u.N/u.m**3)
Btensy = u.Quantity(np.zeros(x.shape),
|
unit=u.N/u.m**3)
#==============================================
|
================================
#calculate the magnetic field and pressure/density balancing expressions
#==============================================================================
for i in range(0,model_pars['nftubes']):
for j in range(i,model_pars['nftubes']):
if rank == 0:
print'calculating ij-pair:',i,j
if i == j:
pressure_mi, rho_mi, Bxi, Byi ,Bzi, B2x, B2y =\
atm.construct_magnetic_field(
x, y, z,
xi[i], yi[i], Si[i],
model_pars, option_pars,
physical_constants,
scales
)
Bx, By, Bz = Bxi+Bx, Byi+By ,Bzi+Bz
Btensx += B2x
Btensy += B2y
pressure_m += pressure_mi
rho_m += rho_mi
else:
pressure_mi, rho_mi, Fxi, Fyi, B2x, B2y =\
atm.construct_pairwise_field(
x, y, z,
xi[i], yi[i],
xi[j], yi[j], Si[i], Si[j],
model_pars,
option_pars,
physical_constants,
scales
|
ChrisTimperley/rostrace
|
rostrace/service.py
|
Python
|
mit
| 5,361 | 0.002611 |
#!/usr/bin/env python
#
# Limitations:
# - doesn't work if another node is using a persistent connection
# - all names MUST be fully qualified, else rosservice will fail
#
# TODO:
# - watch out for new services and tap them when they come online
# - stop broadc
|
asting a service when the original host dies?
#
# http://docs.ros.org/diamondback
|
/api/rosservice/html/index.html
import sys
import inspect
import rospy
import std_srvs.srv
import std_msgs.msg
import rosgraph
import rosservice
import rospy.core
import json
from pprint import pprint as pp
from rospy.impl.tcpros_base import TCPROSTransport
# we use the most accurate timer available to the system
from timeit import default_timer as timer
"""
All (tapped) service calls are broadcast to the /rec/srvs topic in a JSON
format. The +queue_size+ parameter creates an asynchronous publisher, which
is better suited to our needs (higher throughput)
"""
class ServiceTapper(object):
"""
Acts a proxy, forwarding a given service call onto its intended recepient,
whilst logging details of the service call to the appropriate topic
"""
def __handler(self, server, service_name, proxy, req):
time_start = timer()
client = req._connection_header['callerid']
# generate a JSON-encodable description of the parameters for this request
# TODO: will fail with complex, embedded objects
params = {p: getattr(req, p) for p in req.__slots__}
# send the request and wait for a response
success = False
try:
ret = proxy(req)
success = True
response = {p: getattr(ret, p) for p in ret.__slots__}
except rospy.ServiceException, e:
success = False
response = {'reason': e}
# log the service call
finally:
time_end = timer()
time_duration = time_end - time_start
log = {
'service': service_name,
'server': server,
'client': client,
'time_start': time_start,
'time_end': time_end,
'time_duration': time_duration,
'params': params,
'response': response,
'success': success
}
serviceCallPublisher.publish(json.dumps(log))
return ret
"""
Listens to all activity on a given service
"""
def listen_to(self, service_name):
rospy.loginfo("Tapping service: {}".format(service_name))
# block until the service is available
rospy.wait_for_service(service_name)
# determine which node provides the given service
server = rosservice.get_service_node(service_name)
assert not server is None
# get the class used by this service
service_cls = rosservice.get_service_class_by_name(service_name)
# create a persistent proxy to that service
# inject a persistent connection into the proxy, so that when we replace
# the original service, we can still forward messages onto the old one
proxy = rospy.ServiceProxy(service_name, service_cls, persistent=True)
# TODO: listen for failures
# http://docs.ros.org/jade/api/rospy/html/rospy.impl.tcpros_service-pysrc.html#ServiceProxy
service_uri = self.master.lookupService(proxy.resolved_name)
(dest_addr, dest_port) = rospy.core.parse_rosrpc_uri(service_uri)
proxy.transport = TCPROSTransport(proxy.protocol, proxy.resolved_name)
proxy.transport.buff_size = proxy.buff_size
proxy.transport.connect(dest_addr, dest_port, service_uri)
# record the URI of the original service, so we can restore it later
self.tapped[service_name] = service_uri
# create a new, tapped service, with the same name
tap = lambda r: self.__handler(server, service_name, proxy, r)
rospy.Service(service_name, service_cls, tap)
rospy.loginfo("Tapped service: {}".format(service_name))
"""
Listens to all activity on all specified services
"""
def listen(self, services):
rospy.loginfo("Tapping services...")
services = rosservice.get_service_list(include_nodes=True)
for (service, node) in services:
# ignore irrelevant services
if node == 'rostrace' or service.endswith('/get_loggers') or service.endswith('/set_logger_level'):
continue
self.listen_to(service)
rospy.loginfo("Tapped services")
"""
Restores all tapped services to their original form. Must be called before
the program is closed, otherwise those services will become unavailable.
"""
def restore(self):
rospy.loginfo("Restoring services...")
for (service_name, uri) in self.tapped.items():
rospy.loginfo("Restoring service: {}".format(service_name))
self.master.registerService(service_name, uri, uri)
rospy.loginfo("Restored service: {}".format(service_name))
rospy.loginfo("Restored services")
"""
Constructs a new service tapper
"""
def __init__(self):
self.master = rosgraph.Master('/roscore')
self.publisher = \
rospy.Publisher('rec/srvs', std_msgs.msg.String, queue_size=10)
self.tapped = {}
|
DIRACGrid/DIRAC
|
src/DIRAC/DataManagementSystem/DB/FileCatalogComponents/FileManager/FileManagerBase.py
|
Python
|
gpl-3.0
| 54,505 | 0.001431 |
""" FileManagerBase is a base class for all the specific File Managers
"""
# pylint: disable=protected-access
import six
import os
import stat
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.Core.Utilities.List import intListToString
from DIRAC.Core.Utilities.Pfn import pfnunparse
class FileManagerBase(object):
"""Base class for all the specific File Managers"""
def __init__(self, database=None):
self.db = database
self.statusDict = {}
def _getConnection(self, connection):
if connection:
return connection
res = self.db._getConnection()
if res["OK"]:
return res["Value"]
gLogger.warn("Failed to get MySQL connection", res["Message"])
return connection
def setDatabase(self, database):
self.db = database
def getFileCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Files in the catalog"""
connection = self._getConnection(connection)
resultDict = {}
req = "SELECT COUNT(*) FROM FC_Files;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_Replicas )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Files w/o Replicas"] = res["Value"][0][0]
req = "SELECT COUNT(RepID) FROM FC_Replicas WHERE FileID NOT IN ( SELECT FileID FROM FC_Files )"
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Replicas w/o Files"] = res["Value"][0][0]
treeTable = self.db.dtree.getTreeTable()
req = "SELECT COUNT(FileID) FROM FC_Files WHERE DirID NOT IN ( SELECT DirID FROM %s)" % treeTable
res = self.db._query(req, connection)
if not res["OK"]:
return res
resultDict["Orphan Files"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_Files WHERE FileID NOT IN ( SELECT FileID FROM FC_FileInfo)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["Files w/o FileInfo"] = 0
else:
resultDict["Files w/o FileInfo"] = res["Value"][0][0]
req = "SELECT COUNT(FileID) FROM FC_FileInfo WHERE FileID NOT IN ( SELECT FileID FROM FC_Files)"
res = self.db._query(req, connection)
if not res["OK"]:
resultDict["FileInfo w/o Files"] = 0
else:
resultDict["FileInfo w/o Files"] = res["Value"][0][0]
return S_OK(resultDict)
def getReplicaCounters(self, connection=False):
"""Get a number of counters to verify the sanity of the Replicas in the catalog"""
connection = self._getConnection(connection)
req = "SELECT COUNT(*) FROM FC_Replicas;"
res = self.db._query(req, connection)
if not res["OK"]:
return res
return S_OK({"Replicas": res["Value"][0][0]})
######################################################
#
# File write methods
#
def _insertFiles(self, lfns, uid, gid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteFiles(self, toPurge, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _insertReplicas(self, lfns, master=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFiles(self, lfns, metadata=["FileID"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileReplicas(self, fileIDs, fields_input=["PFN"], allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getFileIDFromGUID(self, guid, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def getLFNForGUID(self, guids, connection=False):
"""Returns the LFN matching a given GUID"""
return S_ERROR("To be implemented on derived class")
def _setFileParameter(self, fileID, paramName, paramValue, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _deleteReplicas(self, lfns, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaStatus(self, fileID, se, status, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _setReplicaHost(self, fileID, se, newSE, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFiles(self, dirID, fileNames, metadata, allStatus=False, connection=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryFileIDs(self, dirID, requestString=False):
"""To be implemented on derived class"""
return S_ERROR("To be implemented on derived class")
def _findFileIDs(self, lfns, connection=False):
"""To be implemented on derived class
Should return following the successful/failed convention
Successful is a dictionary with keys the lfn, and values the FileID"""
return S_ERROR("To be implemented on derived class")
def _getDirectoryReplicas(self, dirID, allStatus=False, connection=False):
"""To be implemented on derived class
Should return with only one value, being a list of all the replicas (FileName,FileID,SEID,PFN)
"""
return S_ERROR("To be implemented on derived class")
def countFilesInDir(self, dirId):
"""Count how many files there is in a given Directory
:param int dirID: directory id
:returns: S_OK(value) or S_ERROR
"""
return S_ERROR("To be implemented on derived class")
def _getFileLFNs(self, fileIDs):
"""Get the file LFNs for a given list of file IDs"""
stringIDs = intListToString(fileIDs)
treeTable = self.db.dtree.getTreeTable()
req = (
"SELECT F.FileID, CONCAT(D.DirName,'/',F.FileName) from FC_Files as F,\
%s as D WHERE F.FileID IN ( %s ) AND F.DirID=D.DirID"
% (treeTable, stringIDs)
)
result = self.db._query(req)
if not result["OK"]:
return result
fileNameDict = {}
for row in result["Value"]:
fileNameDict[row[0]] = row[1]
failed = {}
successful = fileNameDict
if len(fileNameDict) != len(fileIDs):
for id_ in fileIDs:
if id_ not in fileNameDict:
failed[id_] = "File ID not found"
return S_OK({"Successful": successful, "Failed": failed})
def addFile(self, lfns, credDict, connection=False):
"""Add files to the catalog
:param dict lfns: dict{ lfn : info}. 'info' is a dict containing PFN, SE, Size and Checksum
the SE parameter can be a list if we have several repl
|
icas to register
"""
connection = self._getConnection(connection)
successful = {}
failed = {}
for lfn, info in list(lfns.items()):
res = self
|
._checkInfo(info, ["PFN", "SE", "Size", "Checksum"])
if not res["OK"]:
failed[lfn] = res["Message"]
lfns.pop(lfn)
res = self._addFiles(lfns, credDict, connection=connection)
if not res["OK"]:
for lfn in lfns.keys():
failed[lfn] = res["Message"]
else:
failed.update(res["
|
elainenaomi/sciwonc-dataflow-examples
|
sbbd2016/experiments/4-mongodb-rp-3sh/9_workflow_full_10files_primary_3sh_noannot_with_proj_9s/averageratio_0/AverageRatioEvent_0.py
|
Python
|
gpl-3.0
| 1,463 | 0.002051 |
#!/usr/bin/env python
"""
This activity will calculate the average of ratios between CPU request and Memory request by each event type.
These fields are optional and could be null.
"""
# It will connect to DataStoreClient
from sciwonc.dataflow.DataStoreClient import DataStoreClient
import ConfigDB_Average_0
# connector and config
client = DataStoreClient("mongodb", ConfigDB_Average_0)
config = ConfigDB_Average_0
# according to config
dataList = client.getData() # return an array of docs (like a csv reader)
output = []
if(dataList):
for i in dataList:
sum_ratio
|
= 0
total_valid_tasks = 0
total_tasks = 0
event_type = i[config.COLUMN]
|
while True:
doc = i['data'].next()
if doc is None:
break;
total_tasks += 1
if(doc['ratio cpu memory']):
sum_ratio = sum_ratio + float(doc['ratio cpu memory'])
total_valid_tasks += 1
newline = {}
newline['event type'] = event_type
newline['sum ratio cpu memory'] = sum_ratio
newline['total valid tasks'] = total_valid_tasks
newline['total tasks'] = total_tasks
if((sum_ratio > 0) and (total_valid_tasks > 0)):
newline['mean ratio cpu memory'] = sum_ratio / total_valid_tasks
else:
newline['mean ratio cpu memory'] = None
output.append(newline)
# save
client.saveData(output)
|
paksu/django-cassandra-engine
|
setup.py
|
Python
|
bsd-2-clause
| 1,839 | 0.000544 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Django Cassandra Engine - the Cassandra backend for Django'
try:
with open('README.rst', 'rb') as f:
LONG_DESCRIPTION = f.read().decode('utf-8')
except IOError:
with open('README.md', 'rb') as f:
LONG_DESCRIPTION = f.read().decode('utf-8')
with open('requirements.txt', 'r') as f:
DEPENDENCIES = f.read().splitlines()
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
keywords='django cassandra engine backend driver wrapper database nonrel '
'cqlengine',
download_url='https://github.com/r4fek/django-cassandra-engine/tarball/master',
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=DEPENDENCIES,
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
test_suite='testproject.runtests.main',
tests_require=['mock==1.0.1', 'django-nose'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
|
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python ::
|
3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
simonenkong/python_training_mantis
|
test/test_add_project.py
|
Python
|
gpl-2.0
| 726 | 0.004132 |
__author__ = 'Nataly'
from model.project import Project
import string
import random
def random_s
|
tring(prefix, maxlen):
symbols = string.ascii_letters
return prefix + "".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
def test_add_project(app):
project = Project(random_string("name_", 10), random_string("description_", 10))
old_list = app.soap.get_project_list()
|
if project in old_list:
app.project.delete_project(project)
old_list = app.soap.get_project_list()
app.project.add_project(project)
new_list = app.soap.get_project_list()
old_list.append(project)
assert sorted(old_list, key=Project.id_or_max) == sorted(new_list, key=Project.id_or_max)
|
AkioNak/bitcoin
|
test/functional/p2p_ibd_txrelay.py
|
Python
|
mit
| 1,575 | 0.00254 |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee filters during and after IBD."""
from decimal import Decimal
from test_framework.messages import COIN
from test_framework.test_framework import BitcoinTestFramework
MAX_FEE_FILTER = Decimal(9170997) / COIN
NORMAL_FEE_FILTER = Decimal(100) / COIN
class P2PIBDTxRelayTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [
["-minrelaytxfee={}".format(NO
|
RMAL_FEE_FILTER)],
["-minrelaytxfee={}".format(NORMAL_FEE_FILTER)],
]
def run_test(self):
self.log.info("Check that nodes set minfilter to MAX_MONEY while still in IBD")
for node in self.nodes:
assert node.getblockchaininfo()['initialblockdownload']
self.wait_until(lambda
|
: all(peer['minfeefilter'] == MAX_FEE_FILTER for peer in node.getpeerinfo()))
# Come out of IBD by generating a block
self.generate(self.nodes[0], 1)
self.sync_all()
self.log.info("Check that nodes reset minfilter after coming out of IBD")
for node in self.nodes:
assert not node.getblockchaininfo()['initialblockdownload']
self.wait_until(lambda: all(peer['minfeefilter'] == NORMAL_FEE_FILTER for peer in node.getpeerinfo()))
if __name__ == '__main__':
P2PIBDTxRelayTest().main()
|
VincentMelia/PythonBasketball
|
Configuration.py
|
Python
|
mit
| 125 | 0 |
im
|
port os
BasketballPlayerDatabase = 'BasketballPlayerDatabase.p'
Root_URL = 'https://'
|
+ os.getenv('basketball_root_url')
|
jangsutsr/tower-cli
|
lib/tower_cli/utils/command.py
|
Python
|
apache-2.0
| 1,677 | 0 |
# Copyright 2015, Ansible, Inc.
# Luke Sneeringer <lsneeringer@ansible.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for
|
the specific language governing permissions and
# limitations under the License.
import click
|
class Command(click.Command):
"""A Command subclass that adds support for the concept that invocation
without arguments assumes `--help`.
This code is adapted by taking code from click.MultiCommand and placing
it here, to get just the --help functionality and nothing else.
"""
def __init__(self, name=None, no_args_is_help=True, **kwargs):
self.no_args_is_help = no_args_is_help
super(Command, self).__init__(name=name, **kwargs)
def parse_args(self, ctx, args):
"""Parse arguments sent to this command.
The code for this method is taken from MultiCommand:
https://github.com/mitsuhiko/click/blob/master/click/core.py
It is Copyright (c) 2014 by Armin Ronacher.
See the license:
https://github.com/mitsuhiko/click/blob/master/LICENSE
"""
if not args and self.no_args_is_help and not ctx.resilient_parsing:
click.echo(ctx.get_help())
ctx.exit()
return super(Command, self).parse_args(ctx, args)
|
Svolcano/python_exercise
|
dianhua/worker/crawler/china_telecom/heilongjiang/main.py
|
Python
|
mit
| 16,977 | 0.003178 |
# -*- coding: utf-8 -*-
import hashlib
from lxml import etree
from Crypto.Cipher import AES
import base64
import time
import traceback
import re
import sys
import random
reload(sys)
sys.setdefaultencoding("utf8")
if __name__ == '__main__':
import sys
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
from crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
from datetime import date
from dateutil.relativedelta import relativedelta
class Crawler(BaseCrawler):
def __init__(self, **kwargs):
super(Crawler,self).__init__(**kwargs)
def need_parameters(self, **kwargs):
# return ['pin_pwd', 'captcha_verify']
return ['pin_pwd']
def login(self, **kwargs):
ProvinceID = '10'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
url = 'http://www.189.cn/dqmh/my189/checkMy189Session.do'
data = {
'fastcode': '20000846'
}
code, key, resp = self.post(url, data=data)
if code != 0:
return code, key
headers = {
'Referer': 'http://www.189.cn/dqmh/my189/initMy189home.do',
}
url = 'http://www.189.cn/dqmh/ssoLink.do?method=linkTo&platNo=10010&toStUrl=http://hl.189.cn/service/zzfw.do?method=ywsl&id=10&fastcode=20000846&cityCode=hl'
code, key, resp = self.get(url, headers=headers)
if code != 0:
return code, key
final_url = 'http://hl.189.cn/service/zzfw.do?method=ywsl&id=10&fastcode=20000846&cityCode=hl'
for retry in xrange(self.max_retry):
code, key, resp = self.get(final_url)
if code != 0:
return code, key
if u'发送随机短信密码' in resp.text:
return 0, "success"
else:
pass
else:
self.log('crawler', 'request_error', resp)
return 9, 'website_busy_error'
def get_verify_type(self, **kwargs):
return 'SMS'
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = 'http://hl.189.cn/service/userCheck.do'
params = {'method': 'sendMsg'}
code, key, resp = self.post(send_sms_url, params=params)
if code != 0:
return code, key, ''
if resp.text == '1':
return 0, "success", ""
else:
self.log('crawler', 'request_error', resp)
return 9, "request_error", ""
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = 'http://hl.189.cn/service/zzfw.do'
check_sms_data = {
'method': 'checkDX',
'yzm': kwargs['sms_code']
}
code, key, resp = self.post(check_sms_url, data=check_sms_data)
|
if code != 0:
return code, key
if u'点击取消弹出' in resp.text:
return 0, "success"
elif u'验证码错误' in resp.text:
self.log('crawler', 'verify_error', resp)
return 2, 'verify_error'
else:
self.log("crawler", "unknown_error", resp)
return 9, "unknown_error"
def crawl_info(self, **kwargs):
result = {}
|
tel_info_url = 'http://hl.189.cn/service/crm_cust_info_show.do?funcName=custSupport&canAdd2Tool=canAdd2Tool'
code, key, resp = self.get(tel_info_url)
if code != 0:
return code, key, {}
try:
selector = etree.HTML(resp.text)
full_name = selector.xpath('//div[@class="fe-yu-ku"]/table/tr[2]/td[2]/text()')
if full_name:
result['full_name'] = full_name[0]
else:
result['full_name'] = ""
result['id_card'] = ''
address = selector.xpath('//div[@class="fe-yu-ku"]/table/tr[8]/td[2]/span[1]/input/@value')
if address:
result['address'] = address[0]
else:
result['address'] = ""
result['open_date'] = ''
except:
error = traceback.format_exc()
self.log('crawler', 'html_error %s' % error, resp)
return 9, "html_error", {}
return 0, "success", result
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
missing_list = []
possibly_missing_list = []
website_num = 0
crawler_num = 0
call_log = []
today = date.today()
search_month = [x for x in range(-1, -6, -1)]
this_month = "%d%02d" % (today.year, today.month)
st_time = time.time()
et_time = st_time + 3
end_time = st_time+ 12
first_page_retry_times = self.max_retry
rand_time = random.randint(2,3)
while True:
first_page_retry_times -=1
#查询当月的详单
key, level, call_log_month, wrong_flag = self.deal_call_log('2', kwargs['tel'], this_month)
now_time = time.time()
if level == -1:
possibly_missing_list.append(this_month)
break
elif level != 0:
if first_page_retry_times >0 :
continue
elif now_time < et_time:
time.sleep(random.randint(1,2))
else:
missing_list.append(this_month)
if wrong_flag == 'website':
website_num += 1
elif wrong_flag == 'crawler':
crawler_num += 1
break
else:
call_log.extend(call_log_month)
break
# 查询历史详单
for each_month in search_month:
month_missing = 0
query_date = today + relativedelta(months=each_month)
query_month = "%d%02d" % (query_date.year, query_date.month)
senc_page_retry_times = self.max_retry
while True:
senc_page_retry_times -= 1
key, level, call_log_history, wrong_flag = self.deal_call_log('1', kwargs['tel'], query_month)
if level == -1:
month_missing += 1
possibly_missing_list.append(query_month)
break
elif level != 0:
now_time = time.time()
if senc_page_retry_times >0:
continue
elif now_time<end_time:
time.sleep(rand_time)
else:
missing_list.append(query_month)
if wrong_flag == 'website':
website_num += 1
elif wrong_flag == 'crawler':
crawler_num += 1
break
else:
call_log.extend(call_log_history)
break
missing_list = list(set(missing_list))
if len(possibly_missing_list + missing_list) == 6:
if crawler_num > 0:
return 9, 'crawl_error', call_log, missing_list, possibly_missing_list
return 9, 'website_busy_error', call_log, missing_list, possibly_missing_lis
|
jrwdunham/lingsync2old
|
delete-empty.py
|
Python
|
apache-2.0
| 947 | 0.001056 |
"""Simple script to delete all forms with "PLACEHOLDER" as their transcription
and translation value.
"""
import sys
import json
from old_client import OLDClient
url = 'URL'
username = 'USERNAME'
password = 'PASSWORD'
c = OLDClient(url)
logged_in = c.login(username, password)
if not logged_in:
sys.exit('Could n
|
ot log in')
search = {
"query": {
"filter": ['and', [
['Form', 'transcription', '=', 'PLACEHOLDER'],
['Form', 'translations', 'transcription', '=', 'PLACEHOLDER']
]]
}
}
empty_forms = c.search
|
('forms', search)
print 'Deleting %d forms.' % len(empty_forms)
deleted_count = 0
for form in empty_forms:
delete_path = 'forms/%d' % form['id']
resp = c.delete(delete_path)
if (type(resp) is not dict) or resp['id'] != form['id']:
print 'Failed to delete form %d' % form['id']
else:
deleted_count += 1
print 'Deleted %d forms.' % deleted_count
|
jackfirth/pyramda
|
pyramda/private/curry_spec/__init__.py
|
Python
|
mit
| 242 | 0 |
from .curry_spec import CurrySpec, ArgValues
from .arg_values_fulf
|
ill_curry_spec import arg_values_fulfill_curry_spec
from .make_func_curry_spec import make_func_curry_spec
from .remove_args_from_c
|
urry_spec import remove_args_from_curry_spec
|
unaguil/hyperion-ns2
|
experiments/measures/generic/Overhead.py
|
Python
|
apache-2.0
| 1,273 | 0.01414 |
import re
from measures.periodicValues.PeriodicValues import PeriodicValues
from measures.generic.GenericMeasure import GenericMeasure as GenericMeasure
import measures.generic.Units as Units
class Overhead(GenericMeasure):
def
|
__init__(self, period, simulationTime):
GenericMeasure.__init__(self, '', period, simulationTime, Units.MESSAGE_OVERHEAD)
self.__measures = []
self.__initializePattern = re.compile('INFO peer.BasicPeer - Peer ([0-9]+) initializing ([0-9]+\,[0-9]+).*?')
self.__neighbors = 0
def addMeasure(self, measure):
self.__measures.append(measure)
|
def parseLine(self, line):
m = self.__initializePattern.match(line)
if m is not None:
self.__neighbors += 1
return
for measure in self.__measures:
measure.parseLine(line)
def getValues(self):
return PeriodicValues(0, self.getPeriod(), self.getSimulationTime())
def getTotalValue(self):
total = 0
for measure in self.__measures:
total += measure.getTotalValue()
return total / float(self.__neighbors) / self.getSimulationTime()
|
gajim/python-nbxmpp
|
nbxmpp/modules/bookmarks/pep_bookmarks.py
|
Python
|
gpl-3.0
| 3,379 | 0 |
# Copyright (C) 2018 Philipp Hörist <philipp AT hoerist.com>
#
# This file is part of nbxmpp.
#
# This program is free
|
software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; If not, see <http://www.gnu.org/licenses/>.
from nbxmpp.namespaces import Namespace
from nbxmpp.protocol import NodeProcessed
from nbxmpp.structs import StanzaHandler
from nbxmpp.task import iq_request_task
from nbxmpp.errors import MalformedStanzaError
from nbxmpp.modules.base import BaseModule
from nbxmpp.modules.util import raise_if_error
from nbxmpp.modules.bookmarks.util import parse_bookmarks
from nbxmpp.modules.bookmarks.util import build_storage_node
BOOKMARK_OPTIONS = {
'pubsub#persist_items': 'true',
'pubsub#access_model': 'whitelist',
}
class PEPBookmarks(BaseModule):
_depends = {
'publish': 'PubSub',
'request_items': 'PubSub',
}
def __init__(self, client):
BaseModule.__init__(self, client)
self._client = client
self.handlers = [
StanzaHandler(name='message',
callback=self._process_pubsub_bookmarks,
ns=Namespace.PUBSUB_EVENT,
priority=16),
]
def _process_pubsub_bookmarks(self, _client, stanza, properties):
if not properties.is_pubsub_event:
return
if properties.pubsub_event.node != Namespace.BOOKMARKS:
return
item = properties.pubsub_event.item
if item is None:
# Retract, Deleted or Purged
return
try:
bookmarks = parse_bookmarks(item, self._log)
except MalformedStanzaError as error:
self._log.warning(error)
self._log.warning(stanza)
raise NodeProcessed
if not bookmarks:
self._log.info('Bookmarks removed')
return
pubsub_event = properties.pubsub_event._replace(data=bookmarks)
self._log.info('Received bookmarks from: %s', properties.jid)
for bookmark in bookmarks:
self._log.info(bookmark)
properties.pubsub_event = pubsub_event
@iq_request_task
def request_bookmarks(self):
_task = yield
items = yield self.request_items(Namespace.BOOKMARKS, max_items=1)
raise_if_error(items)
if not items:
yield []
bookmarks = parse_bookmarks(items[0], self._log)
for bookmark in bookmarks:
self._log.info(bookmark)
yield bookmarks
@iq_request_task
def store_bookmarks(self, bookmarks):
_task = yield
self._log.info('Store Bookmarks')
self.publish(Namespace.BOOKMARKS,
build_storage_node(bookmarks),
id_='current',
options=BOOKMARK_OPTIONS,
force_node_options=True)
|
xenolog/fuel-utils
|
fuel_utils/fdb_cleaner/config.py
|
Python
|
apache-2.0
| 2,221 | 0.002251 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import eventlet
eventlet.monkey_patch()
import re
import sys
import errno
import logging
from settings i
|
mport LOG_NAME
class BaseAuthConfig(object):
"""
read auth config and store it.
Try be a singletone
"""
def __init__(self):
self._configs = {}
@staticmethod
def _read_config(cfg_file):
"""
Read OS auth config file
cfg_file -- the
|
path to config file
"""
auth_conf_errors = {
'OS_TENANT_NAME': 'Missing tenant name.',
'OS_USERNAME': 'Missing username.',
'OS_PASSWORD': 'Missing password.',
'OS_AUTH_URL': 'Missing API url.',
}
rv = {}
stripchars = " \'\""
LOG = logging.getLogger(LOG_NAME)
try:
with open(cfg_file) as f:
for line in f:
rg = re.match(r'\s*export\s+(\w+)\s*=\s*(.*)', line)
if rg:
rv[rg.group(1).strip(stripchars)] = \
rg.group(2).strip(stripchars)
except IOError:
LOG.error("Can't open file '{path}'".format(path=cfg_file))
sys.exit(errno.ENOENT)
# error detection
exit_msg = []
for i, e in auth_conf_errors.iteritems():
if rv.get(i) is None:
exit_msg.append(e)
if len(exit_msg) > 0:
for msg in exit_msg:
LOG.error("AUTH-config error: '{msg}'".format(msg=msg))
sys.exit(errno.EPROTO)
return rv
def read(self, cfg_filename='/root/openrc'):
"""
Read or get from cache OS auth config file
Args:
cfg_filename (str) -- the path to config file
Returns:
Dict of auth params.
Raises:
IOError: if file can't readable or not wound.
"""
rv = self._configs.get(cfg_filename)
if rv:
return rv
rv = self._read_config(cfg_filename)
self._configs[cfg_filename] = rv
return self._configs.get(cfg_filename)
AuthConfig = BaseAuthConfig()
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
h4wkmoon/shinken
|
shinken/objects/timeperiod.py
|
Python
|
agpl-3.0
| 31,954 | 0.005195 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebe
|
l, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR P
|
URPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
# Calendar date
# -------------
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 8 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) / (\d+) ([0-9:, -]+)'
# => len = 5 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) - (\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 7 => CALENDAR_DATE
#
# '(\d{4})-(\d{2})-(\d{2}) ([0-9:, -]+)'
# => len = 4 => CALENDAR_DATE
#
# Month week day
# --------------
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) / (\d+) ([0-9:, -]+)'
# => len = 8 => MONTH WEEK DAY
# e.g.: wednesday 1 january - thursday 2 july / 3
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - march 15 / 3 => MONTH DATE
# e.g.: monday 2 - thusday 3 / 2 => WEEK DAY
# e.g.: day 2 - day 6 / 3 => MONTH DAY
#
# '([a-z]*) (\d+) - (\d+) / (\d+) ([0-9:, -]+)' => len = 6
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) - ([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 7
# e.g.: wednesday 1 january - thursday 2 july => MONTH WEEK DAY
#
# '([a-z]*) (\d+) - (\d+) ([0-9:, -]+)' => len = 7
# e.g.: thursday 2 - 4 => WEEK DAY
# e.g.: february 1 - 15 / 3 => MONTH DATE
# e.g.: day 1 - 4 => MONTH DAY
#
# '([a-z]*) (\d+) - ([a-z]*) (\d+) ([0-9:, -]+)' => len = 5
# e.g.: february 1 - march 15 => MONTH DATE
# e.g.: monday 2 - thusday 3 => WEEK DAY
# e.g.: day 2 - day 6 => MONTH DAY
#
# '([a-z]*) (\d+) ([0-9:, -]+)' => len = 3
# e.g.: february 3 => MONTH DATE
# e.g.: thursday 2 => WEEK DAY
# e.g.: day 3 => MONTH DAY
#
# '([a-z]*) (\d+) ([a-z]*) ([0-9:, -]+)' => len = 4
# e.g.: thusday 3 february => MONTH WEEK DAY
#
# '([a-z]*) ([0-9:, -]+)' => len = 6
# e.g.: thusday => normal values
#
# Types: CALENDAR_DATE
# MONTH WEEK DAY
# WEEK DAY
# MONTH DATE
# MONTH DAY
#
import time
import re
from item import Item, Items
from shinken.daterange import Daterange, CalendarDaterange
from shinken.daterange import StandardDaterange, MonthWeekDayDaterange
from shinken.daterange import MonthDateDaterange, WeekDayDaterange
from shinken.daterange import MonthDayDaterange
from shinken.brok import Brok
from shinken.property import IntegerProp, StringProp, ListProp, BoolProp
from shinken.log import logger, naglog_result
class Timeperiod(Item):
id = 1
my_type = 'timeperiod'
properties = Item.properties.copy()
properties.update({
'timeperiod_name': StringProp(fill_brok=['full_status']),
'alias': StringProp(default='', fill_brok=['full_status']),
'use': StringProp(default=''),
'register': IntegerProp(default='1'),
# These are needed if a broker module calls methods on timeperiod objects
'dateranges': ListProp(fill_brok=['full_status'], default=[]),
'exclude': ListProp(fill_brok=['full_status'], default=[]),
'is_active': BoolProp(default='0')
})
running_properties = Item.running_properties.copy()
def __init__(self, params={}):
self.id = Timeperiod.id
Timeperiod.id = Timeperiod.id + 1
self.unresolved = []
self.dateranges = []
self.exclude = ''
self.customs = {}
self.plus = {}
self.invalid_entries = []
for key in params:
# timeperiod objects are too complicated to support multi valued
# attributes. we do as usual, last set value wins.
if isinstance(params[key], list):
if params[key]:
params[key] = params[key][-1]
else:
params[key] = ''
if key in ['name', 'alias', 'timeperiod_name', 'exclude', 'use', 'register', 'imported_from', 'is_active', 'dateranges']:
setattr(self, key, params[key])
else:
self.unresolved.append(key + ' ' + params[key])
self.cache = {} # For tunning purpose only
self.invalid_cache = {} # same but for invalid search
self.configuration_errors = []
self.configuration_warnings = []
# By default the tp is None so we know we just start
self.is_active = None
self.tags = set()
def get_name(self):
return getattr(self, 'timeperiod_name', 'unknown_timeperiod')
# We fillfull properties with template ones if need
# for the unresolved values (like sunday ETCETC)
def get_unresolved_properties_by_inheritance(self, items):
# Ok, I do not have prop, Maybe my templates do?
# Same story for plus
for i in self.templates:
self.unresolved.extend(i.unresolved)
# Ok timeperiods are a bit different from classic items, because we do not have a real list
# of our raw properties, like if we got february 1 - 15 / 3 for example
def get_raw_import_values(self):
properties = ['timeperiod_name', 'alias', 'use', 'register']
r = {}
for prop in properties:
if hasattr(self, prop):
v = getattr(self, prop)
print prop, ":", v
r[prop] = v
# Now the unresolved one. The only way to get ride of same key things is to put
# directly the full value as the key
for other in self.unresolved:
r[other] = ''
return r
def is_time_valid(self, t):
if self.has('exclude'):
for dr in self.exclude:
if dr.is_time_valid(t):
return False
for dr in self.dateranges:
if dr.is_time_valid(t):
return True
return False
# will give the first time > t which is valid
def get_min_from_t(self, t):
mins_incl = []
for dr in self.dateranges:
mins_incl.append(dr.get_min_from_t(t))
return min(mins_incl)
# will give the first time > t which is not valid
def get_not_in_min_from_t(self, f):
pass
def find_next_valid_time_from_cache(self, t):
try:
return self.cache[t]
except KeyError:
return None
def find_next_invalid_time_from_cache(self, t):
try:
return self.invalid_cache[t]
except KeyError:
return None
# will look for active/un-active change. And log it
# [1327392000] TIMEPERIOD TRANSITION: <name>;<from>;<to>
# from is -1 on startup. to is 1 if the timeperiod starts
# and 0 if it ends.
def check_and_log_activation_change(self):
now = int(time.time())
was_active = self.is_active
self.is_active = self.is_time_valid(now)
# If we got a change, log it!
if self.is_active != was_active:
_from = 0
_to = 0
# If it's the start, get a special value for was
if was_active is None:
_from = -1
if was_active:
_from = 1
if self.is_active:
_to = 1
# Now raise the log
naglog_result('info', 'TIMEPERIOD TRANSITION: %s;%d;%d'
% (self.get_name(), _from, _to))
# clean the get_next_valid_time_fr
|
kolanos/dialtone
|
dialtone/blueprints/message/__init__.py
|
Python
|
mit
| 68 | 0 |
from dialtone.blueprints.message
|
.views import bp as messa
|
ge # noqa
|
bverdu/onDemand
|
onDemand/plugins/iot/example/nest/base.py
|
Python
|
agpl-3.0
| 22,923 | 0.000305 |
# encoding: utf-8
'''
Created on 18 août 2015
@author: Bertrand Verdu
'''
from collections import OrderedDict
from lxml import etree as et
from onDemand.plugins import Client
from onDemand.protocols.rest import Rest
from onDemand.plugins.nest.structure import Structure
from onDemand.plugins.nest.thermostat import Thermostat
from onDemand.plugins.nest.smoke_co_alarm import SmokeAlarm
datamodel = '''<?xml version="1.0" encoding="UTF-8"?>
<cms:SupportedDataModels xmlns:cms="urn:schemas-upnp-org:dm:cms" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn: schemas-upnp-org:dm:cms ''' +\
'''http://www.upnp.org/schemas/dm/cms.xsd">
<SubTree>
<URI>
urn:upnp-org:smgt:1
</URI>
<Location>
/UPnP/SensorMgt
</Location>
<URL>
http://www.upnp.org/specs/smgt/UPnP-smgt-SensorDataModel-v1-Service.pdf
</URL>
<Description>
Nest© sensors model
</Description>
</SubTree>
</SupportedDataModels>'''
xmllist = '''<?xml version="1.0" encoding="UTF-8"?>
<cms:{pt}List xmlns:cms="urn: schemas-upnp-org:dm:cms" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn: schemas-upnp-org:dm:cms ''' +\
'''http://www.upnp.org/schemas/dm/cms.xsd">
<!-- The document contains a list of zero or more elements. -->
{val}
</cms:{pt}List>'''
sensor_events = '''<?xml version="1.0" encoding="utf-8"?>
<SensorEvents xmlns="urn:schemas-upnp-org:smgt:sdmevent" ''' +\
'''xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ''' +\
'''xsi:schemaLocation="urn:schemas-upnp-org:smgt:sdmevent ''' +\
'''http://www.upnp.org/schemas/smgt/sdmevent-v1.xsd">
{sensor_event}
</SensorEvents>'''
sensor_event = '''<sensorevent collectionID="{col_id}" ''' +\
'''sensorID="{sensor_id}" event="{name}"/>'''
xmlpath = '<{pt}Path>{val}</{pt}Path>'
parameter = '''<Parameter>
<ParameterPath>{resp[0]}</ParameterPath>
<Value>{resp[1]}</Value>
</Parameter>'''
class Nest(Rest, Client):
col_parameters = ('CollectionID', 'CollectionType',
'CollectionFriendlyName', 'CollectionInformation',
'CollectionUniqueIdentifier', 'CollectionSpecific/')
sensors_parameters = ('SensorID', 'SensorType', 'SensorEventEnable',
'SensorSpecific/', 'SensorURNsNumberOfEntries ',
'SensorURNs')
data_items_parameters = ('ClientID',
'ReceiveTimestamp',
'Name', 'Type', 'Encoding', 'Description')
path = '/UPNP/SensorMgt'
pathlevels = {}
pathlevels.update({1: ['/UPNP/SensorMgt/SensorEvents',
'/UPNP/SensorMgt/SensorCollectionsNumberOfEntries',
'/UPNP/SensorMgt/SensorCollections/#/']})
pathlevels.update(
{2: pathlevels[1] +
['/UPNP/SensorMgt/SensorCollections/SensorsNumberOfEntries']})
pathlevels.update(
{3: [p for p in pathlevels[2] if
p != '/UPNP/SensorMgt/SensorCollections/#/'] +
[''.join(('/UPNP/SensorMgt/SensorCollections/#/', p))
for p in col_parameters] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/']})
pathlevels.update({4: pathlevels[3]})
pathlevels.update(
{5: [p for p in pathlevels[4] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/'] +
[''.join(('/UPnP/SensorMgt/SensorCollections/#/Sensors/#/', p))
for p in sensors_parameters] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/#/']})
pathlevels.update(
{6: pathlevels[5] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItemsNumberOfEntries']})
pathlevels.update(
{7: [p for p in pathlevels[6] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/'] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs'] +
['/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItems/#/']})
pathlevels.update({8: pathlevels[7]})
pathlevels.update(
{9: [p for p in pathlevels[8] if
p != '/UPnP/SensorMgt/SensorCollections/#/Sensors/#/SensorURNs/' +
'DataItems/#/'] +
[''.join(('/UPnP/SensorMgt/SensorCollections/#/Sensors/#/' +
'SensorURNs/DataItems/#/', p))
for p in data_items_parameters]})
pathlevels.update({0: pathlevels[9]})
def __init__(self, *args, **kwargs):
kwargs.update({'event_handler': self.got_data})
self._structures = OrderedDict()
self._devices = OrderedDict()
# self._sensors = []
self._data = {}
self.events = {}
super(Nest, self).__init__(*args, **kwargs)
def got_data(self, data):
# print('1')
if isinstance(data, dict):
evt = dictdiffupdate(self._data, data)
# print('3')
if len(evt) > 0 and 'data' in evt:
self.update(evt['data'])
self._data = data
else:
print(data)
def parse_tree(self, starting_node, depth):
if depth == 0:
ad = 8
else:
ad = len(starting_node.split(self.path)[1].split('/')) - 1 + depth
if ad > 1:
r = [xmlpath.format(pt='Insta
|
nce', val='/'.join((
self.path,
'SensorCollections',
str(self._structures.keys().index(p)), '')))
for p in self._structures]
if ad > 3:
|
s = []
u = []
for key, structure in self._structures.iteritems():
i = 0
id_ = str(self._structures.keys().index(key))
s.append('/'.join((self.path,
'SensorCollections',
id_,
'Sensors',
str(i),
'')))
u.append('/'.join((self.path,
'SensorCollections',
id_,
'Sensors',
str(i),
'SensorsURNs',
'0',
'')))
for dev_id in structure.thermostats:
i += 1
s.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors', str(i),
'')))
device = self._devices[dev_id]
for j in range(len(device.sensors.keys())):
u.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors',
str(i), 'SensorsURNs', str(j), '')))
for dev_id in structure.smoke_co_alarms:
i += 1
s.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors', str(i),
'')))
device = self._devices[dev_id]
for j in range(len(device.sensors.keys())):
u.append('/'.join((
self.path, 'SensorCollections', id_, 'Sensors',
str(i), 'SensorsURNs', str(j), '')))
r += [xmlpath.format(pt='Instance', val=p) for p in s]
if ad > 5:
r += [xmlpath.format(pt='Instance', val=p) for p in u]
if ad > 7:
pp = []
for p in u:
for i in range(6):
pp.append(''.join((p, 'DataItems/', str(i) + '/')))
r += [xmlpath.format(pt='Instance', val=p) for p in pp]
return xmllist.format(pt='InstancePath', val='\n'.join(r))
def update(self, event):
# print('4')
if 'structures' in event:
# print('structure update')
for id_, value in event['structures'].iteritems():
# print('%s -- %s' % (id_, value))
if id_ in sel
|
patillacode/patilloid
|
app/patilloid.py
|
Python
|
gpl-3.0
| 565 | 0.00177 |
import traceback
from f
|
lask import (
Blueprint,
current_app,
jsonify,
render_template,
)
bp = Blueprint('patilloid', __name__)
@bp.route('/', methods=('GET',))
def index():
try:
current_app.logger.info("Let's show them Patilloid!")
return render_template('patilloid.html')
except Exception as err:
current_app.logger.error(err)
current_app.logger.error(traceback.format_exc())
return (
jsonify({"er
|
ror": "Sorry, something bad happened with your request."}),
400,
)
|
the-packet-thrower/pynet
|
Week07/ex2_vlan.py
|
Python
|
gpl-3.0
| 3,369 | 0.002078 |
#!/usr/bin/env python
'''
Using Arista's pyeapi, create a script that allows you to add a VLAN (both the
VLAN ID and the VLAN name). Your script should first check that the VLAN ID is
available and only add the VLAN if it doesn't already exist. Use VLAN IDs
between 100 and 999. You should be able to call the script from the command
line as follows:
python eapi_vlan.py --name blue 100 # add VLAN100, name blue
If you call the script with the --remove option, the VLAN will be removed.
python eapi_vlan.py --remove 100 # remove VLAN100
Once again only remove the VLAN if it exists on the switch. You will probably
want to use Python's argparse to accomplish the argument processing.
'''
import pyeapi
import argparse
def pyeapi_result(output):
'''
Return the 'result' value from the pyeapi output
'''
return output[0]['result']
def check_vlan_exists(eapi_conn, vlan_id):
'''
Check if the given VLAN exists
Return either vlan_name or False
'''
vlan_id = str(vlan_id)
cmd = 'show vlan id {}'.format(vlan_id)
try:
response = eapi_conn.enable(cmd)
check_vlan = pyeapi_result(response)['vlans']
return check_vlan[vlan_id]['name']
except (pyeapi.eapilib.CommandError, KeyError):
pass
return False
def configure_vlan(eapi_conn, vlan_id, vlan_name=None):
'''
Add the given vlan_id to the switch
Set the vlan_name (if provided)
Note, if the vlan already exists, then this will just set the vlan_name
'''
command_str1 = 'vlan {}'.format(vlan_id)
cmd = [command_str1]
if vlan_name is not None:
command_str2 = 'name {}'.format(vlan_name)
cmd.append(command_str2)
return eapi_conn.config(cmd)
def main():
'''
Add/remove vlans from Arista switch in an idempotent manner
|
'''
eapi_conn = pyeapi.connect_to("pynet-sw2")
# Argument parsing
parser = argparse.ArgumentParser(
description="Idempotent addition/removal of VLAN to Arista switch"
)
parser.add_argument("vlan_id", help="VLAN number to create or remove", action="store", type=int)
parser.add_argument(
"--name",
help="Specify VLAN name",
action="store",
dest="vlan_name",
type=str
)
parser.add_argument("--remove", help="R
|
emove the given VLAN ID", action="store_true")
cli_args = parser.parse_args()
vlan_id = cli_args.vlan_id
remove = cli_args.remove
vlan_name = cli_args.vlan_name
# Check if VLAN already exists
check_vlan = check_vlan_exists(eapi_conn, vlan_id)
# check if action is remove or add
if remove:
if check_vlan:
print "VLAN exists, removing it"
command_str = 'no vlan {}'.format(vlan_id)
eapi_conn.config([command_str])
else:
print "VLAN does not exist, no action required"
else:
if check_vlan:
if vlan_name is not None and check_vlan != vlan_name:
print "VLAN already exists, setting VLAN name"
configure_vlan(eapi_conn, vlan_id, vlan_name)
else:
print "VLAN already exists, no action required"
else:
print "Adding VLAN including vlan_name (if present)"
configure_vlan(eapi_conn, vlan_id, vlan_name)
if __name__ == "__main__":
main()
|
z23han/Wrangling-MongoDB
|
Lesson_3_Problem_Set/03-Fixing_the_Area/area.py
|
Python
|
agpl-3.0
| 2,143 | 0.0056 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this pr
|
oblem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
Since in the previous quiz you made a decision on which value to keep for the "areaLand" field,
you now know what has to be done.
Finish the function fix_area(). It will receive a string as an input, and
|
it has to return a float
representing the value of the area or None.
You have to change the function fix_area. You can use extra functions if you like, but changes to process_file
will not be taken into account.
The rest of the code is just an example on how this function can be used.
"""
import codecs
import csv
import json
import pprint
CITIES = 'cities.csv'
def fix_area(area):
# YOUR CODE HERE
if area == 'NULL':
return None
elif area.startswith('{'):
area = area.replace('{', '')
if area.endswith('}'):
area = area.replace('}', '')
dataList = area.split('|')
retArea = ''
for data in dataList:
if len(data) > len(retArea):
retArea = str(data)
return float(retArea)
else:
return float(area)
global_name = ['areaLand', 'name', 'areaMetro', 'populationTotal', 'postalCode']
def process_file(filename, key):
# CHANGES TO THIS FUNCTION WILL BE IGNORED WHEN YOU SUBMIT THE EXERCISE
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if key in line:
line[key] = fix_area(line[key])
data.append(line)
return data
def test():
nameNum = 0
data = process_file(CITIES, global_name[nameNum])
print "Printing three example results:"
for n in range(5,8):
pprint.pprint(data[n][global_name[nameNum]])
#assert data[8][global_name[1]] == 55166700.0
#assert data[3][global_name[1]] == None
if __name__ == "__main__":
test()
|
mathLab/RBniCS
|
rbnics/utils/io/performance_table.py
|
Python
|
lgpl-3.0
| 12,306 | 0.003169 |
# Copyright (C) 2015-2022 by the RBniCS authors
#
# This file is part of RBniCS.
#
# SPDX-License-Identifier: LGPL-3.0-or-later
import os
import sys
import collections
from numpy import exp, isnan, log, max, mean, min, nan, zeros as Content
from rbnics.utils.io.csv_io import CSVIO
from rbnics.utils.io.folders import Folders
class PerformanceTable(object):
# Storage for class methods
_suppressed_groups = list()
_preprocessor_setitem = dict()
def __init__(self, testing_set):
self._columns = dict() # string to Content matrix
self._columns_operations = dict() # string to tuple
self._columns_not_implemented = dict() # string to bool
self._rows_not_implemented = dict() # string to dict of bool
self._groups = dict() # string to list
self._group_names_sorted = list()
self._len_testing_set = len(testing_set)
self._Nmin = 1
self._Nmax = 0
def set_Nmin(self, Nmin):
self._Nmin = Nmin
def set_Nmax(self, Nmax):
self._Nmax = Nmax
def add_column(self, column_name, group_name, operations):
assert self._Nmax > 0
assert self._Nmax >= self._Nmin
assert column_name not in self._columns and column_name not in self._columns_operations
self._columns[column_name] = Content((self._Nmax - self._Nmin + 1, self._len_testing_set))
self._columns_not_implemented[column_name] = None # will be set to a bool
self._rows_not_implemented[column_name] = {
n: None for n in range(self._Nmax - self._Nmin + 1)} # will be set to a bool
if group_name not in self._groups:
self._groups[group_name] = list()
self._group_names_sorted.append(group_name) # preserve the ordering provided by the user
self._groups[group_name].append(column_name)
if isinstance(operations, str):
self._columns_operations[column_name] = (operations,)
elif isinstance(operations, tuple):
self._columns_operations[column_name] = operations
else:
raise ValueError("Invalid operation in PerformanceTable")
@classmethod
def suppress_group(cls, group_name):
cls._suppressed_groups.append(group_name)
@classmethod
def clear_suppressed_groups(cls):
cls._suppressed_groups = list()
@classmethod
def preprocess_setitem(cls, group_name, function):
cls._preprocessor_setitem[group_name] = function
@classmethod
def clear_setitem_preprocessing(cls):
cls._preprocessor_setitem.clear()
def __getitem__(self, args):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
assert self._columns_not_implemented[column_name] in (True, False)
assert self._rows_not_implemented[column_name][N - self._Nmin] in (True, False)
if (not self._columns_not_implemented[column_name]
and not self._rows_not_implemented[column_name][N - self._Nmin]):
return self._columns[column_name][N - self._Nmin, mu_index]
else:
return CustomNotImplementedAfterDiv
def __setitem__(self, args, value):
assert len(args) == 3
column_name = args[0]
N = args[1]
mu_index = args[2]
if is_not_implemented(value):
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] is None:
self._columns_not_implemented[column_name] = True
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, True)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = True
else:
assert self._columns_not_implemented[column_name] in (None, True, False)
if self._columns_not_implemented[column_name] in (None, True):
self._columns_not_implemented[column_name] = False
assert self._rows_not_implemented[column_name][N - self._Nmin] in (None, False)
if self._rows_not_implemented[column_name][N - self._Nmin] is None:
self._rows_not_implemented[column_name][N - self._Nmin] = False
if column_name not in self._preprocessor_setitem:
self._columns[column_name][N - self._Nmin, mu_index] = value
else:
self._columns[column_name][N - self._Nmin, mu_index] = self._preprocessor_setitem[column_name](value)
def _process(self):
groups_content = collections.OrderedDict()
for group in self._group_names_sorted:
# Skip suppresed groups
if group in self._suppressed_groups:
continue
# Populate all columns
columns = list()
for column in self._groups[group]:
assert self._columns_not_implemented[column] in (True, False)
if self._columns_not_implemented[column] is False:
columns.append(column)
if len(columns) == 0:
continue
# Storage for print
table_index = list() # of strings
table_header = dict() # from string to string
table_content = dict() # from string to Content array
column_size = dict() # from string to int
# First column should be the reduced space dimension
table_index.append("N")
table_header["N"] = "N"
table_content["N"] = list(range(self._Nmin, self._Nmax + 1))
column_size["N"] = max([max([len(str(x)) for x in table_content["N"]]), len("N"
|
)])
# Then fill in with postprocessed data
for column in columns:
for operation in self._columns_operations[column]:
# Set header
if operation in ("min", "max"):
current_table_header = operation + "(" + column + ")"
current_table_index = operation + "_" + column
elif operation == "mean":
current_table_header = "gmean(" + column
|
+ ")"
current_table_index = "gmean_" + column
else:
raise ValueError("Invalid operation in PerformanceTable")
table_index.append(current_table_index)
table_header[current_table_index] = current_table_header
# Compute the required operation of each column over the second index (testing set)
table_content[current_table_index] = Content((self._Nmax - self._Nmin + 1,))
for n in range(self._Nmin, self._Nmax + 1):
assert self._rows_not_implemented[column][n - self._Nmin] in (None, True, False)
if self._rows_not_implemented[column][n - self._Nmin] is False:
if operation == "min":
current_table_content = min(self._columns[column][n - self._Nmin, :])
elif operation == "mean":
data = self._columns[column][n - self._Nmin, :]
if not data.any(): # all zeros
current_table_content = 0.
else:
data[data == 0.] = sys.float_info.epsilon
current_table_content = exp(mean(log(data)))
elif operation == "max":
current_table_content = max(self._columns[column][n - self._Nmin, :])
else:
raise ValueError("Invalid operation in PerformanceTable")
table_content[current_table_index][n - self._Nmin] = current_table_content
else:
table_content[current_table_index][n - self._Nmin] = nan
# Get the width of the columns
|
dagwieers/ansible
|
lib/ansible/modules/cloud/kubevirt/kubevirt_rs.py
|
Python
|
gpl-3.0
| 6,766 | 0.002513 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: kubevirt_rs
short_description: Manage KubeVirt virtual machine replica sets
description:
- Use Openshift Python SDK to manage the state of KubeVirt virtual machine replica sets.
version_added: "2.8"
author: KubeVirt Team (@kubevirt)
options:
state:
description:
- Create or delete virtual machine replica sets.
default: "present"
choices:
- present
- absent
type: str
name:
description:
- Name of the virtual machine replica set.
required: true
type: str
namespace:
description:
- Namespace where the virtual machine replica set exists.
required: true
type: str
selector:
description:
- "Selector is a label query over a set of virtual machine."
required: true
type: dict
replicas:
description:
- Number of desired pods. This is a pointer to distinguish between explicit zero and not specified.
- Replicas defaults to 1 if newly created replica set.
type: int
extends_documentation_fragment:
- k8s_auth_options
- kubevirt_vm_options
- kubevirt_common_options
requirements:
- python >= 2.7
- openshift >= 0.8.2
'''
EXAMPLES = '''
- name: Create virtual machine replica set 'myvmir'
kubevirt_rs:
state: presnet
name: myvmir
namespace: vms
wait: true
replicas: 3
memory: 64M
labels:
myvmi: myvmi
selector:
matchLabels:
myvmi: myvmi
disks:
- name: containerdisk
volume:
containerDisk:
image: kubevirt/cirros-container-disk-demo:latest
path: /custom-disk/cirros.img
disk:
bus: virtio
- name: Remove virtual machine replica set 'myvmir'
kubevirt_rs:
state: absent
name: myvmir
namespace: vms
wait: true
'''
RETURN = '''
kubevirt_rs:
description:
- The virtual machine virtual machine replica set managed by the user.
- "This dictionary contains all values returned by the KubeVirt API all options
are described here U(https://kubevirt.io/api-reference/master/definitions.html#_v1_virtualmachineinstance)"
returned: success
type: complex
contains: {}
'''
import copy
import traceback
from ansible.module_utils.k8s.common import AUTH_ARG_SPEC
try:
from openshift.dynamic.client import ResourceInstance
except ImportError:
# Handled in module_utils
pass
from ansible.module_utils.kubevirt import (
virtdict,
KubeVirtRawModule,
VM_COMMON_ARG_SPEC,
)
KIND = 'VirtualMachineInstanceReplicaSet'
VMIR_ARG_SPEC = {
'replicas': {'type': 'int'},
'selector': {'type': 'dict'},
}
class KubeVirtVMIRS(KubeVirtRawModule):
@property
def argspec(self):
""" argspec property builder """
argument_spec = copy.deepcopy(AUTH_ARG_SPEC)
argument_spec.update(copy.deepcopy(VM_COMMON_ARG_SPEC))
argument_spec.update(copy.deepcopy(VMIR_ARG_SPEC))
return argument_spec
def _read_stream(self, resource, watcher, stream, name, replicas):
""" Wait for ready_replicas to equal the requested number of replicas. """
if self.params.get('state') == 'absent':
# TODO: Wait for absent
return
return_obj = None
for event in stream:
if event.get('object'):
obj = ResourceInstance(resource, event['object'])
if obj.metadata.name == name and hasattr(obj, 'status'):
if replicas == 0:
if not hasattr(obj.status, 'readyReplicas') or not obj.status.readyReplicas:
return_obj = obj
watcher.stop()
break
if hasattr(obj.status, 'readyReplicas') and obj.status.readyReplicas == replicas:
return_obj = obj
watcher.stop()
break
if not return_obj:
self.fail_json(msg="Error fetching the patched object. Try a higher wait_timeout value.")
if replicas and return_obj.status.readyReplicas is None:
self.fail_json(msg="Failed to fetch the number of ready replicas. Try a hi
|
gher wait_timeout value.")
if replicas and return_obj.status.readyReplicas != replicas:
self.fail_json(msg="Number of ready replicas is {0}. Failed to reach {1} ready replicas within "
"the wait_timeout period.".format(return_obj.status.ready_replicas, replicas))
return return_obj.to_dict()
def wait_for_replicas(self):
namespace = self.params.get('namespace')
wait_timeout = self.params.get('wait_tim
|
eout')
replicas = self.params.get('replicas')
name = self.name
resource = self.find_supported_resource(KIND)
w, stream = self._create_stream(resource, namespace, wait_timeout)
return self._read_stream(resource, w, stream, name, replicas)
def execute_module(self):
# Parse parameters specific for this module:
definition = virtdict()
selector = self.params.get('selector')
replicas = self.params.get('replicas')
if selector:
definition['spec']['selector'] = selector
if replicas is not None:
definition['spec']['replicas'] = replicas
# Execute the CURD of VM:
template = definition['spec']['template']
dummy, definition = self.construct_vm_definition(KIND, definition, template)
result_crud = self.execute_crud(KIND, definition)
changed = result_crud['changed']
result = result_crud.pop('result')
# Wait for the replicas:
wait = self.params.get('wait')
if wait:
result = self.wait_for_replicas()
# Return from the module:
self.exit_json(**{
'changed': changed,
'kubevirt_rs': result,
'result': result_crud,
})
def main():
module = KubeVirtVMIRS()
try:
module.execute_module()
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
|
CenterForOpenScience/lookit-api
|
studies/migrations/0030_merge_20170827_1909.py
|
Python
|
apache-2.0
| 334 | 0 |
# -*- coding: utf-8 -
|
*-
# Generated by Django 1.11.2 on 2017-08-27 23:09
from __future__ import unicode_literals
from django.db import migrations
class Migrat
|
ion(migrations.Migration):
dependencies = [
("studies", "0024_merge_20170823_1352"),
("studies", "0029_auto_20170825_1505"),
]
operations = []
|
vileopratama/vitech
|
src/addons/hw_escpos/escpos/printer.py
|
Python
|
mit
| 6,802 | 0.007939 |
#!/usr/bin/python
import usb.core
import usb.util
import serial
import socket
from escpos import *
from constants import *
from exceptions import *
from time import sleep
class Usb(Escpos):
""" Define USB printer """
def __init__(self, idVendor, idProduct, interface=0, in_ep=0x82, out_ep=0x01):
"""
@param idVendor : Vendor ID
@param idProduct : Product ID
@param interface : USB device interface
@param in_ep : Input end point
@param out_ep : Output end point
"""
self.errorText = "ERROR PRINTER\n\n\n\n\n\n"+PAPER_FULL_CUT
self.idVendor = idVendor
self.idProduct = idProduct
self.interface = interface
self.in_ep = in_ep
self.out_ep = out_ep
self.open()
def open(self):
""" Search device on USB tree and set is as escpos device """
self.device = usb.core.find(idVendor=self.idVendor, idProduct=self.idProduct)
if self.device is None:
raise NoDeviceError()
try:
if self.device.is_kernel_driver_active(self.interface):
self.device.detach_kernel_driver(self.interface)
self.device.set_configuration()
usb.util.claim_interface(self.device, self.interface)
except usb.core.USBError as e:
raise HandleDeviceError(e)
def close(self):
i = 0
while True:
try:
if not self.device.is_kernel_driver_active(self.interface):
usb.util.release_interface(self.device, self.interface)
self.device.attach_kernel_driver(self.interface)
usb.util.dispose_resources(self.device)
else:
self.device = None
return True
except usb.core.USBError as e:
i += 1
if i > 10:
return False
sleep(0.1)
def _raw(self, msg):
""" Print any command sent in raw format """
if len(msg) != self.device.write(self.out_ep, msg, self.interface):
self.device.write(self.out_ep, self.errorText, self.interface)
raise TicketNotPrinted()
def __extract_status(self):
maxiterate = 0
rep = None
while rep == None:
maxiterate += 1
if maxiterate > 10000:
raise NoStatusError()
r = self.device.read(self.in_ep, 20, self.interface).tolist()
while len(r):
rep = r.pop()
return rep
def get_printer_status(self):
status = {
'printer': {},
'offline': {},
'error' : {},
'paper' : {},
}
self.device.write(self.out_ep, DLE_EOT_PRINTER, self.interface)
printer = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_OFFLINE, self.interface)
offline = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_ERROR, self.interface)
error = self.__extract_status()
self.device.write(self.out_ep, DLE_EOT_PAPER, self.interface)
paper = self.__extract_status()
status['printer']['status_code'] = printer
status['printer']['status_error'] = not ((printer & 147) == 18)
status['printer']['online'] = not bool(printer & 8)
status['printer']['recovery'] = bool(printer & 32)
status['printer']['paper_feed_on'] = bool(printer & 64)
status['printer']['drawer_pin_high'] = bool(printer & 4)
status['offline']['status_code'] = offline
status['offline']['status_error'] = not ((offline & 147) == 18)
status['offline']['cover_open'] = bool(offline & 4)
status['offline']['paper_feed_on'] = bool(offline & 8)
status['offline']['paper'] = not bool(offline & 32)
status['offline']['error'] = bool(offline & 64)
status['error']['status_code'] = error
status['error']['status_error'] = not ((error & 147) == 18)
status['error']['recoverable'] = bool(error & 4)
status['error']['autocutter'] = bool(error & 8)
status['error']['unrecoverable'] = bool(error & 32)
status['error']['auto_recoverable'] = not bool(error & 64)
status['paper']['status_code'] = paper
status['paper']['status_error'] = not ((paper & 147) == 18)
status['paper']['near_end'] = bool(paper & 12)
status['paper']['present'] = not bool(paper & 96)
return status
def __del__(self):
""" Release USB interface """
if self.device:
self.close()
self.device = None
class Serial(Escpos):
""" Define Serial printer """
def __init__(self, devfile="/dev/ttyS0", baudrate=9600, bytesize=8, timeout=1):
"""
@param devfile : Device file under dev filesystem
@param baudrate : Baud rate for serial transmission
@param bytesize : Serial buffer size
@param timeout : Read/Write timeout
"""
self.devfile = devfile
self.baudrate = baudrate
self.bytesize = bytesize
self.timeout = timeout
self.open()
def open(self):
""" Setup serial port and set is as escpos device """
self.device = serial.Serial(port=self.devfile, baudrat
|
e=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True)
if self.device is not None:
print "Serial printer enabled"
else:
print "Unable to open serial printer on: %s" % self.devfile
def
|
_raw(self, msg):
""" Print any command sent in raw format """
self.device.write(msg)
def __del__(self):
""" Close Serial interface """
if self.device is not None:
self.device.close()
class Network(Escpos):
""" Define Network printer """
def __init__(self,host,port=9100):
"""
@param host : Printer's hostname or IP address
@param port : Port to write to
"""
self.host = host
self.port = port
self.open()
def open(self):
""" Open TCP socket and set it as escpos device """
self.device = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.device.connect((self.host, self.port))
if self.device is None:
print "Could not open socket for %s" % self.host
def _raw(self, msg):
self.device.send(msg)
def __del__(self):
""" Close TCP connection """
self.device.close()
|
managedkaos/AWS-Python-Boto3
|
s3/delete_contents.py
|
Python
|
mit
| 599 | 0 |
#!/usr/bin/env python
# a script to delete the contents of an s3 buckets
# import the sys and boto3 modules
import sys
import boto3
# create an s3 resource
s3 = boto3.resource('s3')
# iterate over the script arguments as bu
|
cket names
for bucket_name in sys.argv[1:]:
# use the bucket name to create a bucket object
bucket = s3.Bucket(bucket_name)
# delete the bucket's contents and print the res
|
ponse or error
for key in bucket.objects.all():
try:
response = key.delete()
print response
except Exception as error:
print error
|
sbelskie/symplicity
|
manage.py
|
Python
|
apache-2.0
| 253 | 0 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Symplicity.settings")
|
from django.core.management import execute_from_command_line
execut
|
e_from_command_line(sys.argv)
|
jfallmann/bioconda-recipes
|
recipes/hops/hops.py
|
Python
|
mit
| 2,649 | 0.001888 |
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'hops0.31.jar'
default_jvm_mem_opts = ['-Xms1g', '-Xmx2g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
i
|
f java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings
|
of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
if '--jar_dir' in sys.argv[1:]:
print(jar_path)
else:
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
emkailu/PAT3DEM
|
bin/p3starscreen.py
|
Python
|
mit
| 3,906 | 0.031234 |
#!/usr/bin/env python
import os
import sys
import argparse
import pat3dem.star as p3s
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <a star file>
Write two star files after screening by an item and a cutoff in the star file.
Write one star file after screening by a file containing blacklist/whitelist (either keyword or item).
"""
args_def = {'screen':'0', 'cutoff':'00', 'sfile':'0', 'white':0}
parser = argparse.ArgumentParser()
parser.add_argument("star", nargs='*', help="specify a star file to be screened")
parser.add_argument("-s", "--screen", type=str, help="specify the item, by which the star file will be screened, by default {} (no screening). e.g., 'OriginX'".format(args_def['screen']))
parser.add_argument("-c", "--cutoff", type=str, help="specify the cutoff, by default '{}' (-s and -sf will be combined)".format(args_def['cutoff']))
parser.add_argument("-sf", "--sfile", type=str, help="specify a file containing a keyword each line, by default '{}' (no screening). e.g., 'f.txt'".format(args_def['sfile']))
parser.add_argument("-w", "--white", type=int, help="specify as 1 if you provide a whitelist in -sf".format(args_def['white']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# preprocess -sf
if args.sfile != '0':
lines_sf = open(args.sfile).readlines()
lines_sfile = []
for line in lines_sf:
line = line.strip()
if line != '':
lines_sfile += [line]
# get the star file
star = args.star[0]
basename = os.path.basename(os.path.splitext(star)[0])
star_dict = p3s.star_parse(star, 'data_')
header = star_dict['data_'] + star_dict['loop_']
header_len = len(header)
with open(star) as read_star:
lines = read_star.readlines()[header_len:-1]
if args.screen != '0':
# get the sc number
scn = star_dict['_rln'+args.screen]
if args.cutoff != '00':
# Name the output files
screened1 = '{}_screened_{}-gt-{}.star'.format(basename, args.screen, args.cutoff)
screened2 = '{}_screened_{}-le-{}.star'.format(basename, args.screen, args.cutoff)
write_screen1 = open(screened1, 'w')
write_screen1.write(''.join(header))
write_screen2 = open(screened2, 'w')
write_screen2.write(''.join(header))
for line in lines:
if float(line.split()[scn]) > float(args.cutoff):
write_screen1.write(line)
else:
write_screen2.write(line)
write_screen1.write(' \n')
write_screen1.close()
write_screen2.write(' \n')
write_screen2.close()
print 'The screened star files have been written in {} and {}!'.format(screened1, screened2)
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen:
write_screen.write(''.join(header))
if args.white == 0:
for line in lines:
key = line.split()[scn]
if key not in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
else:
for line in lines:
key = line.split()[scn]
if key in lines_sfile:
print 'Include {}.'.format(key)
write_screen.write(line)
write_screen.write(' \n')
elif args.sfile != '0':
with open('{}_screened.star'.format(basename), 'w') as write_screen
|
:
write_screen.write(''.join(header))
if
|
args.white == 0:
for line in lines:
skip = 0
for key in lines_sfile:
if key in line:
skip = 1
print 'Skip {}.'.format(key)
break
if skip == 0:
write_screen.write(line)
else:
for line in lines:
for key in lines_sfile:
if key in line:
print 'Include {}.'.format(key)
write_screen.write(line)
break
write_screen.write(' \n')
if __name__ == '__main__':
main()
|
jtriley/gizmod
|
scripts/modules.d/510-LIRC-MceUSB2-MythTV.py
|
Python
|
apache-2.0
| 7,769 | 0.040674 |
#***
#*********************************************************************
#*************************************************************************
#***
#*** GizmoDaemon Config Script
#*** LIRCMceUSB2 MythTV config
#***
#*****************************************
#*****************************************
#***
"""
Copyright (c) 2007, Gizmo Daemon Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
############################
# Imports
##########################
from GizmoDaemon import *
from GizmoScriptActiveApplication import *
from GizmoScriptAltTabber import *
import subprocess
ENABLED = True
VERSION_NEEDED = 3.2
INTERESTED_CLASSES = [GizmoEventClass.LIRC]
INTERESTED_WINDOWS = ["mythfrontend"]
USES_LIRC_REMOTES = ["mceusb", "mceusb2"]
POWER_APPLICATION = "mythfrontend"
############################
# LIRCMceUSB2MythTV Class definition
##########################
class LIRCMceUSB2MythTV(GizmoScriptActiveApplication):
"""
MythTV LIRC Event Mapping for the MceUSB2 remote
"""
############################
# Public Functions
##########################
def onDeviceEvent(self, Event, Gizmo = None):
"""
Called from Base Class' onEvent method.
See GizmodDispatcher.onEvent documention for an explanation of this function
"""
# if the event isn't from the remote we're interested in don't handle it
if Event.Remote not in USES_LIRC_REMOTES:
return False
# process the key
if Event.Button == "Power":
# if mythfrontend is open, kill it
subprocess.Popen(["killall", "mythfrontend"])
return True
elif Event.Button == "TV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_A)
return True
elif Event.Button == "Music":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_B)
return True
elif Event.Button == "Pictures":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH)
return True
elif Event.Button == "Videos":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_SLASH, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Stop":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "Record":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_R)
return True
elif Event.Button == "Pause":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Rewind":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_COMMA, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Play":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_P)
return True
elif Event.Button == "Forward":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOT, [GizmoKey.KEY_RIGHTSHIFT])
return True
elif Event.Button == "Replay":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGE
|
UP)
return True
elif Event.Button == "Back":
Gizmod.Keyboards[0].createEvent(GizmoEventType
|
.EV_KEY, GizmoKey.KEY_ESC)
return True
elif Event.Button == "Up":
return False
elif Event.Button == "Skip":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_PAGEDOWN)
return True
elif Event.Button == "More":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_M)
return True
elif Event.Button == "Left":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFT)
return True
elif Event.Button == "OK":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_ENTER)
return True
elif Event.Button == "Right":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHT)
return True
elif Event.Button == "Down":
return False
elif Event.Button == "VolUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_RIGHTBRACE)
return True
elif Event.Button == "VolDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_LEFTBRACE)
return True
elif Event.Button == "Home":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_END)
return True
elif Event.Button == "ChanUp":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_UP)
return True
elif Event.Button == "ChanDown":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_DOWN)
return True
elif Event.Button == "RecTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_HOME)
return True
elif Event.Button == "Mute":
return False
elif Event.Button == "DVD":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_H)
return True
elif Event.Button == "Guide":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_S)
return True
elif Event.Button == "LiveTV":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_N)
return True
elif Event.Button == "One":
return False
elif Event.Button == "Two":
return False
elif Event.Button == "Three":
return False
elif Event.Button == "Four":
return False
elif Event.Button == "Five":
return False
elif Event.Button == "Six":
return False
elif Event.Button == "Seven":
return False
elif Event.Button == "Eight":
return False
elif Event.Button == "Nine":
return False
elif Event.Button == "Star":
return False
elif Event.Button == "Zero":
return False
elif Event.Button == "Hash":
return False
elif Event.Button == "Clear":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_C)
return True
elif Event.Button == "Enter":
Gizmod.Keyboards[0].createEvent(GizmoEventType.EV_KEY, GizmoKey.KEY_I)
return True
else:
# unmatched event, keep processing
return False
def onEvent(self, Event, Gizmo = None):
"""
Overloading Base Class' onEvent method!
Make sure to call it!
"""
# check for power button
# if pressed and mythfrontend isn't running, then launch it
# also return False so that other scripts may make use of the power
# button as well
if Event.Class in self.InterestedClasses \
and Event.Remote in USES_LIRC_REMOTES \
and Event.Button == "Power" \
and Gizmod.isProcessRunning(POWER_APPLICATION) < 0:
subprocess.Popen([POWER_APPLICATION])
Gizmod.updateProcessTree() # force an instantaneous process tree update
return False
# call base classe' onEvent method
return GizmoScriptActiveApplication.onEvent(self, Event, Gizmo)
############################
# Private Functions
##########################
def __init__(self):
"""
Default Constructor
"""
GizmoScriptActiveApplication.__init__(self, ENABLED, VERSION_NEEDED, INTERESTED_CLASSES, INTERESTED_WINDOWS)
############################
# LIRCMceUSB2MythTV class end
##########################
# register the user script
LIRCMceUSB2MythTV()
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/macosx/nslog/TestDarwinNSLogOutput.py
|
Python
|
bsd-3-clause
| 5,655 | 0.001061 |
"""
Test DarwinLog "source include debug-level" functionality provided by the
StructuredDataDarwinLog plugin.
These tests are currently only supported when running against Darwin
targets.
"""
import lldb
import platform
import re
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbtest_config
class DarwinNSLogOutputTestCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
mydir = TestBase.compute_mydir(__file__)
@skipUnlessDarwin
@skipIfRemote # this test is currently written using lldb commands & assumes running on local system
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
self.child = None
self.child_prompt = '(lldb) '
self.strict_sources = False
# Source filename.
self.source = 'main.m'
# Output filename.
self.exe_name = self.getBuildArtifact("a.out")
self.d = {'OBJC_SOURCES': self.source, 'EXE': self.exe_name}
# Locate breakpoint.
self.line = line_number(self.source, '// break here')
def tearDown(self):
# Shut down the process if it's still running.
if self.child:
self.runCmd('process kill')
self.expect_prompt()
self.runCmd('quit')
# Let parent clean up
super(DarwinNSLogOutputTestCase, self).tearDown()
def run_lldb_to_breakpoint(self, exe, source_file, line,
settings_commands=None):
# Set self.child_prompt, which is "(lldb) ".
prompt = self.child_prompt
# So that the child gets torn down after the test.
import pexpect
import sys
if sys.version_info.major == 3:
self.child = pexpect.spawnu('%s %s %s' % (lldbtest_config.lldbExec,
self.lldbOption, exe))
else:
self.child = pexpect.spawn('%s %s %s' % (lldbtest_config.lldbExec,
self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
# Disable showing of source lines at our breakpoint.
# This is necessary for the logging tests, because the very
# text we want to match for output from the running inferior
# will show up in the source as well. We don't want the source
# output to erroneously make a match with our expected output.
self.runCmd("settings set stop-line-count-before 0")
self.expect_prompt()
self.runCmd("settings set stop-line-count-after 0")
self.expect_prompt()
# Run any test-specific settings commands now.
if settings_commands is not None:
for setting_command in settings_commands:
self.runCmd(setting_command)
self.expect_prompt()
# Set the breakpoint, and run to it.
child.sendline('breakpoint set -f %s -l %d' % (source_file, line))
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Ensure we stopped at a breakpoint.
self.runCmd("thread list")
self.expect(re.compile(r"stop reason = .*breakpoint"))
def runCmd(self, cmd):
if self.child:
self.child.sendline(cmd)
def expect_prompt(self, exactly=True):
self.expect(self.child_prompt, exactly=exactly)
def expect(self, pattern, exactly=False, *args, **kwargs):
if exactly:
return self.child.expect_exact(pattern, *args, **kwargs)
return self.child.expect(pattern, *args, **kwargs)
def do_test(self, expect_regexes=None, settings_commands=None):
""" Run a test. """
self.build(dictionary=self.d)
self.setTearDownCleanup(dictionary=self.d)
exe = self.getBuildArtifact(self.exe_name)
self.run_lldb_to_breakpoint(exe, self.source, self.line,
settings_commands=settings_commands)
self.expect_prompt()
# Now go.
self.runCmd("process continue")
self.expect(expect_regexes)
def test_nslog_output_is_displayed(self):
"""Test that NSLog() output shows up in the command-line debugger."""
self.do_test(expect_regexes=[
re.compile(r"(This is a message from NSLog)"),
re.compile(r"Process \d+ exited with status")
])
self.assertIsNotNone(self.child.match)
self.assertGreater(len(self.child.match.groups()), 0)
self.assertEqual(
"This is a message from NSLog",
self.child.match.group(1))
def test_nslog_output_is_suppressed_with_env_var(self):
"""Test that NSLog() output does not show up with the ignore env var."""
# This test will only work properly on macOS 10.12+. Skip it on earlier versions.
# This will require some tweaking on iOS.
match = re.match(r"^\d+\.(\d+)", platform.mac_ver()[0])
if match is None or int(match.group(1)) < 12:
|
self.skipTest("requires macOS 10.
|
12 or higher")
self.do_test(
expect_regexes=[
re.compile(r"(This is a message from NSLog)"),
re.compile(r"Process \d+ exited with status")
],
settings_commands=[
"settings set target.env-vars "
"\"IDE_DISABLED_OS_ACTIVITY_DT_MODE=1\""
])
self.assertIsNotNone(self.child.match)
self.assertEqual(len(self.child.match.groups()), 0)
|
ravenbyron/phtevencoin
|
qa/rpc-tests/test_framework/socks5.py
|
Python
|
mit
| 5,705 | 0.005784 |
# Copyright (c) 2015 The Phtevencoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
from __future__ import print_function, division, unicode_literals
import socket, threading, Queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, aty
|
p, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cm
|
d, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = str(recvall(self.conn, n))
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception,e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = Queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
|
EDRN/PublicPortal
|
support/dropsies.py
|
Python
|
apache-2.0
| 703 | 0.007112 |
#!/usr/bin/env py
|
thon
# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
class DropHandler(BaseHTTPRequestHandler):
def dropRequest(self):
self.send_response(200)
self.send_header('Content-length', '0')
self.send_header('Connection', 'close')
self.end_headers()
do_GET = do_POST = do_HEAD = do_PURGE = do_OPTIONS = do_PUT = do_DELETE
|
= do_TRACE = do_CONNECT = dropRequest
def main():
server = HTTPServer(('', 8989), DropHandler)
server.serve_forever()
if __name__ == '__main__':
main()
|
johngumb/danphone
|
junkbox/dm.py
|
Python
|
gpl-3.0
| 1,692 | 0.010047 |
import sys
class DualModulusPrescaler:
def __init__(self,p):
self.m_p = p
return
def set_prescaler(self):
return
# may be internal
def set_a(self,a):
self.m_a = a
return
# may be internal
def set_n(self,n):
self.m_n = n
return
def set_ref_divider(self, r):
self.m_r = r
return
def get_ref_divider(self):
return self.m_r
def get_
|
division_ratio(self):
v = (self.m_p * self.m_n) + self.m_a
return v
class Osc:
def __init__(self, ref_freq, prescaler):
self.m_ref = ref_freq
self.m_prescaler = prescaler
|
return
def get_freq(self):
# print self.m_prescaler.get_division_ratio()
return (self.m_ref/self.m_prescaler.get_ref_divider()) * self.m_prescaler.get_division_ratio()
def calc_a(self):
return
def calc_n(self):
return
def get_counter_params(self,freq):
x = freq * self.m_prescaler.get_ref_divider()/self.m_ref
n = int(x/32)
a = int(round(x-n*32))
encoded = (n<<7) + a
return (n, a, encoded)
def set_freq(self,freq):
return
class Danphone:
def __init__(self,datalink):
dmps = DualModulusPrescaler(32)
#dmps.set_ref_divider(2048)
dmps.set_ref_divider(1088)
osc = Osc(12.8e6,dmps)
print osc.get_counter_params(70.3529E6)
sys.exit(0)
for i in range(128):
dmps.set_a(i)
dmps.set_n(456)
osc = Osc(12.8e6,dmps)
print osc.get_freq()/1000000
return
if __name__=="__main__":
d = Danphone(None)
|
mail-apps/translate
|
translate/storage/fpo.py
|
Python
|
gpl-2.0
| 20,828 | 0.001776 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2011 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Classes for the support of Gettext .po and .pot files.
This implementation assumes that cpo is working. This should not be used
directly, but can be used once cpo has been established to work."""
#TODO:
# - handle headerless PO files better
# - previous msgid and msgctxt
# - accept only unicodes everywhere
import copy
import logging
import re
import six
from translate.lang import data
from translate.misc.multistring import multistring
from translate.storage import base, cpo, pocommon
from translate.storage.pocommon import encodingToUse
logger = logging.getLogger(__name__)
lsep = " "
"""Separator for #: entries"""
basic_header = r'''msgid ""
msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
'''
class pounit(pocommon.pounit):
# othercomments = [] # # this is another comment
# automaticcomments = [] # #. comment extracted from the source code
# sourcecomments = [] # #: sourcefile.xxx:35
# prev_msgctxt = [] # #| The previous values that msgctxt and msgid held
# prev_msgid = [] #
# prev_msgid_plural = [] #
# typecomments = [] # #, fuzzy
# msgidcomment = u"" # _: within msgid
# msgctxt
# msgid = []
# msgstr = []
# Our homegrown way to indicate what must be copied in a shallow
# fashion
__shallow__ = ['_store']
def __init__(self, source=None, encoding="UTF-8"):
pocommon.pounit.__init__(self, source)
self._encoding = encodingToUse(encoding)
self._initallcomments(blankall=True)
self._msgctxt = u""
self.target = u""
def _initallcomments(self, blankall=False):
"""Initialises allcomments"""
if blankall:
self.othercomments = []
self.automaticcomments = []
self.sourcecomments = []
self.typecomments = []
self.msgidcomment = u""
def getsource(self):
return self._source
def setsource(self, source):
self._rich_source = None
# assert isinstance(source, unicode)
source = data.forceunicode(source or u"")
source = source or u""
if isinstance(source, multistring):
self._source = source
elif isinstance(source, unicode):
self._source = source
else:
#unicode, list, dict
self._source = multistring(source)
source = property(getsource, setsource)
def gettarget(self):
"""Returns the unescaped msgstr"""
return self._target
def settarget(self, target):
"""Sets the msgstr to the given (unescaped) value"""
self._rich_target = None
# assert isinstance(target, unicode)
# target = data.forceunicode(target)
if self.hasplural():
if isinstance(target, multistring):
self._target = target
else:
#unicode, list, dict
self._target = multistring(target)
elif isinstance(target, (dict, list)):
if len(target) == 1:
self._target = target[0]
else:
raise ValueError("po msgid element has no plural but msgstr has %d elements (%s)" % (len(target), target))
else:
self._target = target
target = property(gettarget, settarget)
def getnotes(self, origin=None):
"""Return comments based on origin value (programmer, developer, source code and translator)"""
if origin is None:
comments = u"\n".join(self.othercomments)
comments += u"\n".join(self.automaticcomments)
elif origin == "translator":
comments = u"\n".join(self.othercomments)
elif origin in ["programmer", "developer", "source code"]:
comments = u"\n".join(self.automaticcomments)
else:
raise ValueError("Comment type not valid")
return comments
def addnote(self, text, origin=None, position="append"):
"""This is modeled on the XLIFF method. See xliff.py::xliffunit.addnote"""
# ignore empty strings and strings without non-space characters
if not (text and text.strip()):
return
text = data.forceunicode(text)
commentlist = self.othercomments
autocomments = False
if origin in ["programmer", "developer", "source code"]:
autocomments = True
commentlist = self.automaticcomments
if text.endswith(u'\n'):
text = text[:-1]
newcomments = text.split(u"\n")
if position == "append":
newcomments = commentlist + newcomments
elif position == "prepend":
newcomments = newcomments + commentlist
if autocomments:
self.automaticcomments = newcomments
else:
self.othercomments = newcomments
def removenotes(self):
"""Remove all the translator's notes (other comments)"""
self.othercomments = []
def __deepcopy__(self, memo={}):
# Make an instance to serve as the copy
new_unit = self.__class__()
# We'll be testing membership frequently, so make a set from
# self.__shallow__
shallow = set(self.__shallow__)
# Make deep copies of all members which are not in shallow
for key, value in six.iteritems(self.__dict__):
if key not in shallow:
setattr(new_unit, key, copy.deepcopy(value))
# Make shallow copies of all members which are in shallow
for key in set(shallow):
setattr(new_unit, key, getattr(self, key))
# Mark memo with ourself, so that we won't get deep copied
# again
memo[id(self)] = self
# Return our copied unit
return new_unit
def copy(self):
return copy.deepcopy(self)
def _msgidlen(self):
if self.hasplural():
len("".join([string for string in self.source.strings]))
else:
return len(self.source)
def _msgstrlen(self):
if self.hasplural():
len("".join([string for string in self.target.strings]))
else:
return len(self.target)
def merge(self, otherpo, overwrite=False, comments=True, authoritative=False):
"""Merges the otherpo (with the same msgid) into this one.
Overwrite non-blank self.msgstr only if overwrite is True
merge comments only if comments is True
"""
def mergelists(list1, list2, split=False):
#decode where necessary
if unicode in [type(item) for item in list2] + [type(item) for item in list1]:
for position, item in enumerate(list1):
if isinstance(item, str):
list1[position] = item.decode("utf-8")
for position, item in enumerate(list2):
if isinstance(item, st
|
r):
list2[position] = item.decode("utf-8")
#Determine the newline sty
|
le of list2
lineend = ""
if list2 and list2[0]:
for candidate in ["\n", "\r", "\n\r"]:
if list2[0].endswith(candidate):
lineend = candidate
if not lineend:
lineend = ""
#Split if directed to do so:
if split:
|
bethgelab/foolbox
|
tests/test_plot.py
|
Python
|
mit
| 1,086 | 0 |
import pytest
import eagerpy as ep
import foolbox as fbn
def test_plot(dummy: ep.Tensor) -> None:
# just tests that the calls don't throw any errors
images = ep.zeros(dummy, (10, 3, 32, 32))
fbn.plot.images(images)
fbn.plot.images(images, n=3)
fbn.plot.images(images, n=3, data_format="channels_first")
fbn.plot.images(images, nrows=4)
fbn.plot.images(images, ncols=3)
fbn.plot.images(images, nrows=2, ncols=6)
fbn.plot.images(images, nrows=2, nc
|
ols=4)
# test for single channel images
images = ep.zeros(dummy, (10, 32, 32, 1))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 3, 3, 3)
|
)
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 1, 1, 1))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 32, 32))
fbn.plot.images(images)
with pytest.raises(ValueError):
images = ep.zeros(dummy, (10, 3, 32, 32))
fbn.plot.images(images, data_format="foo")
|
nagyistoce/devide
|
modules/vtk_basic/vtkFieldDataToAttributeDataFilter.py
|
Python
|
bsd-3-clause
| 519 | 0.001927 |
# class generated by DeVIDE::createDeVIDEModuleFro
|
mVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkFieldDataToAttributeDataFilter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkFieldDataToAttributeDataFilter(), 'Processing.',
('vtkDataSet',), ('vtkDataSet',),
|
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
zackp30/bugwarrior
|
bugwarrior/config.py
|
Python
|
gpl-3.0
| 5,298 | 0.000755 |
import codecs
from ConfigParser import ConfigParser
import os
import subprocess
import sys
import six
import twiggy
from twiggy import log
from twiggy.levels import name2level
from xdg import BaseDirectory
def asbool(some_value):
""" Cast config values to boolean. """
return six.text_type(some_value).lower() in [
'y', 'yes', 't', 'true', '1', 'on'
]
def get_service_password(service, username, oracle=None, interactive=False):
"""
Retrieve the sensitive password for a service by:
* retrieving password from a secure store (@oracle:use_keyring, default)
* asking the password from the user (@oracle:ask_password, interactive)
* executing a command and use the output as password
(@oracle:eval:<command>)
Note that the keyring may or may not be locked
which requires that the user provides a password (interactive mode).
:param service: Service name, may be key into secure store (as string).
:param username: Username for the service (as string).
:param oracle: Hint which password oracle strategy to use.
:return: Retrieved password (as string)
.. seealso::
https://bitbucket.org/kang/python-keyring-lib
"""
import getpass
import keyring
password = None
if not oracle or oracle == "@oracle:use_keyring":
password = keyring.get_password(service, username)
if interactive and password is None:
# -- LEARNING MODE: Password is not stored in keyring yet.
oracle = "@oracle:ask_password"
password = get_service_password(service, username,
oracle, interactive=True)
if password:
keyring.set_password(service, username, password)
elif interactive and oracle == "@oracle:ask_password":
prompt = "%s password: " % service
password = getpass.getpass(prompt)
elif oracle.startswith('@oracle:eval:'):
command = oracle[13:]
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
#stderr=subprocess.STDOUT
)
password = p.stdout.read()[:-1]
if password is None:
die("MISSING PASSWORD: oracle='%s', interactive=%s for service=%s" %
(oracle, interactive, service))
return password
def load_example_rc():
fname = os.path.join(
os.path.dirname(__file__),
'docs/configuration.rst'
)
with open(fname, 'r') as f:
readme = f.read()
example = readme.split('.. example')[1][4:]
return example
error_template = """
*************************************************
* There was a problem with your bugwarriorrc *
* {msg}
* Here's an example template to help: *
*************************************************
{example}"""
def die(msg):
log.options(suppress_newlines=False).critical(
error_template,
msg=msg,
example=load_example_rc(),
)
sys.exit(1)
def validate_config(config, main_section):
if not config.has_section(main_section):
die("No [%s] section found." % main_section)
twiggy.quickSetup(
name2level(config.get(main_section, 'log.level')),
config.get(main_section, 'log.file')
)
if not config.has_option(main_section, 'targets'):
die("No targets= item in [%s] found." % main_section)
targets = config.get(main_section, 'targets')
targets = filter(lambda t: len(t), [t.strip() for t in targets.split(",")])
if not targets:
die("Empty targets= item in [%s]." % main_section)
for target in targets:
if target not in config.sections():
die("No [%s] section found." % target)
# Validate each target one by one.
for target in targets:
service
|
= config.get(target, 'service')
if not service:
die("No 'service
|
' in [%s]" % target)
if service not in SERVICES:
die("'%s' in [%s] is not a valid service." % (service, target))
# Call the service-specific validator
SERVICES[service].validate_config(config, target)
def load_config(main_section):
config = ConfigParser({'log.level': "DEBUG", 'log.file': None})
path = None
first_path = BaseDirectory.load_first_config('bugwarrior')
if first_path is not None:
path = os.path.join(first_path, 'bugwarriorrc')
old_path = os.path.expanduser("~/.bugwarriorrc")
if path is None or not os.path.exists(path):
if os.path.exists(old_path):
path = old_path
else:
path = os.path.join(BaseDirectory.save_config_path('bugwarrior'), 'bugwarriorrc')
config.readfp(
codecs.open(
path,
"r",
"utf-8",
)
)
config.interactive = False # TODO: make this a command-line option
validate_config(config, main_section)
return config
def get_taskrc_path(conf, main_section):
path = '~/.taskrc'
if conf.has_option(main_section, 'taskrc'):
path = conf.get(main_section, 'taskrc')
return os.path.normpath(
os.path.expanduser(path)
)
# This needs to be imported here and not above to avoid a circular-import.
from bugwarrior.services import SERVICES
|
PythonScanClient/PyScanClient
|
scan/client/data.py
|
Python
|
epl-1.0
| 8,283 | 0.012435 |
'''
Created on Mar 27,2015
@author: Yongxiang Qiu, Kay Kasemir
'''
try:
import xml.etree.cElementTree as ET
except:
import xml.etree.ElementTree as ET
from datetime import datetime
def getTimeSeries(data, name, convert='plain'):
'''Get values aligned by different types of time.
:param name: channel name
:param convert: plain -> timestamp as seconds since epoch
datetime -> datetime objects
:return: value list with time
Example:
>>> data.getTimeSeries(..)
'''
if convert == 'plain':
return [ [t for t in data[name]['time'] ], [v for v in data[name]['value']] ]
elif convert == 'datetime':
return [ [str(getDatetime(time)) for time in data[name]['time']], [ v for v in data[name]['value']] ]
def getDatetime(time):
'''Convert log time
:param time: Posix millisecond timestamp of logged sample
:return: datetime
'''
secs = time / 1000.0
return datetime.fromtimestamp(secs)
def alignSerial(data, channel):
'''
Iterate data by serial ID.
:param: channel: Name of channel(device) needed to be iterate.
:return: ( (id1, value1, time1) ,(id2, value2, time2), ..., (idn, valuen, timen))
'''
R = list(range(len(data[channel]['id'])))
for i in iter(R):
yield (data[channel]['id'][i], data[channel]['value'][i], data[channel]['time'][i])
##TODO: step
def alignTime(data, channel, intv = 0):
'''
Iterate data by time.
:param: channel: Name of channel(device) needed to be iterate.
:return: Iterator object.
'''
R = list(range(len(data[channel]['time'])))
for i in iter(R):
yield (data[channel]['time'][i], data[channel]['value'][i])
def getTable(data, *devices, **kwargs):
'''Create data table
Aligns samples for given list of devices by sample ID.
Assuming that serialID in data is Ascending.
Ignoring the serialID 'gap'.
|
:param devices: One or more devices
:param kwargs: with_id=True to add sample serial id,
with_time=True to add time (seconds since epoch)
:return: Table. result[0],result[1], .. hold the sample ID (if with_id),
the time (if with_time),
then the values for first device, for second device and so on.
'''
with_id = kwargs['with_id'] if 'with_
|
id' in kwargs else False
with_time = kwargs['with_time'] if 'with_time' in kwargs else False
devsIters = [ alignSerial(data, dev) for dev in devices] # prepare devices iterators
cur_samps = [next(devIt) for devIt in devsIters] # initial devices iterators
result = [[] for dev in devices]
if with_id:
result.insert(0, [])
if with_time:
result.insert(0, [])
cur_id = -1 # current sample id
cur_time = 0 # Current sample time
index = 0
while True:
try :
cur_id = min((samp[0] for samp in cur_samps if samp is not None)) # find smallest sample ID as current id
cur_time = max((samp[2] for samp in cur_samps if samp is not None)) # find last time stamp
except ValueError: # finished
break
data_col = 0
if with_id:
result[data_col].append(cur_id)
data_col += 1
if with_time:
result[data_col].append(cur_time)
data_col += 1
for i in range(len(devsIters)): # for each device ,there are 3 situations:
if cur_samps[i] is None: # 1. if device has been exhausted.
result[data_col+i].append(result[data_col+i][index-1]) # continue with previous value
elif cur_samps[i][0] == cur_id: # 2. if serial_id is the current id ( means this device was logged at current serial_id)
try:
result[data_col+i].append(cur_samps[i][1]) # fetch value
cur_samps[i] = next(devsIters[i]) # step iter of current device and its value
except StopIteration: # if current device is just exhausted
cur_samps[i] = None
elif cur_samps[i][0] > cur_id: #3. if serial_id is in the future ( means this device was not logged at the current serial_id)
if index == 0: # 1st loop
result[data_col+i].append(None)
else:
result[data_col+i].append(result[data_col+i][index-1]) # fetch and save the previous value
index += 1
return result
class Data(object):
'''
classdocs
'''
def __init__(self, Xml):
'''
Constructor
'''
self.__logData = self.__parseRaw(Xml)
def __parseRaw(self,Xml):
'''
Raw Shape:
for example :
logData={
'Xpos':{'id':[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'time' : [t1,t2,t3,t4,t5,t6,t7,t8,t9,t10],
'value': [0, 0, 1, 2, 3, 3, 3, 3, 3, 3],
},
'ypos':{'id':[4, 5, 6, 7, 8, 9],
'time' : [t1,t2,t3,t4,t5,t6],
'value': [0, 1, 1, 2, 3, 4],
},
...
'somePV':{'id':[0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],
'time' : [t1,t2,t3,t4,t5,t6, t7, t8, t9,t10,t11,t12,t13,t14,t15,t16]
'value': [v1,v2,v3,v4,v5,v6, v7, v8, v9,v10,v11,v12,v13,v14,v15,v16]
}
}
'''
channels = ET.fromstring(Xml).iter('device')
logdata = {}
for channel in channels:
samples = channel.findall('.//sample')
logdata[channel.find('name').text] = {
'id' : [int(sample.attrib['id']) for sample in samples],
'time' : [int(sample.find('time').text) for sample in samples],
'value' : [self.__types((sample.find('value').text)) for sample in samples]
}
return logdata
def __types(self, text):
'''
Try to cast text to float or int.
'''
try:
if '.' in text:
return float(text)
else:
return int(text)
except ValueError:
return text
finally:
return text
def __getitem__(self, key):
return self.__logData[key]
def PVlist(self):
'''
Get the list of all PV names.
'''
return list(self.__logData.keys())
def PV(self, PVname):
'''
Get all data of a PV.
:param PVname: Name of the PV.
:return: Dictionary of the data sets, like:
{'id':[...], 'time':[...], 'value'[...]}
'''
return self.__logData[PVname]
def PVvalue(self, PVname):
'''
Get all values of a PV, with
:param PVname: Name of the PV.
:return: List of the values of the PV, like:
[0.1,0.2,...,19.2]
'''
return self.__logData[PVname]['value']
def PVtime(self, PVname):
'''
Get all timestamps of a PV.
:param PVname: Name of the PV.
:return: List of the timestamps of the PV, like:
['1427396679782', '1427396679782', ... , '1427396679782']
'''
return self.__logData[PVname]['time']
def __str__(self):
'''
Give a readable printing of the logged data.
'''
prettyOut = ''
for key in self.__logData:
prettyOut += key + ' : \n'
prettyOut += '{\n'
prettyOut += " 'id' : " + str(self.__logData[key]['id']) + ' ,\n'
prettyOut += " 'time' : " + str(self.__logData[key]['time']) + ' ,\n'
prettyOut += " 'value' :
|
Telefonica/toolium-examples
|
android/tests/test_android.py
|
Python
|
apache-2.0
| 1,294 | 0.000774 |
# -*- coding: utf-8 -*-
u"""
Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
This file is part of Toolium.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
|
from nose.tools import assert_equal
from android.pageobjects.menu import MenuPageObject
from android.pageobjects.tabs import TabsPageObject
from android.test_cases import AndroidTestCase
class Tabs(AndroidTestCase):
def test_change_tab(self):
# Open tabs activity
MenuPageObject().open_option('Views').open_option('Tabs').open_option('1. Content By Id')
tabs_page = TabsPageObject()
# Check that the first tab is open
as
|
sert_equal('tab1', tabs_page.content1.text)
# Open second tab and check content
tabs_page.tab2.click()
assert_equal('tab2', tabs_page.content2.text)
|
mdboom/freetypy
|
docstrings/bitmap.py
|
Python
|
bsd-2-clause
| 5,308 | 0 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
Bitmap__init__ = """
A structure used to describe a bitmap or pixmap to the raster.
`Bitmap` supports the Python buffer interface, so it is easy to
convert it to a Numpy array. For example::
>>> import numpy as np
>>> a = np.asarra
|
y(bitmap)
"""
Bitmap_buffer = """
Get the bitmap's contents as a buffer.
In most cases, the preferred method to get the data is to cast the
`Bitmap` object to a memoryview, since that will also have size and
type information.
"""
Bitmap_convert = """
Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth
1bpp, 2bpp, 4bpp, or 8bpp converts it to one with
|
depth 8bpp, making
the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of
`alignment`.
Parameters
----------
alignment : int, optional
The pitch of the bitmap is a multiple of this parameter. Common
values are 1, 2, or 4.
Returns
-------
target : Bitmap
The bitmap, converted to 8bpp.
"""
Bitmap_num_grays = """
The number of gray levels used in the bitmap. This field is only used
with `PIXEL_MODE.GRAY`.
"""
Bitmap_pitch = """
The number of bytes taken by one bitmap row.
Includes padding.
The pitch is positive when the bitmap has a ‘down’ flow, and negative
when it has an ‘up’ flow. In all cases, the pitch is an offset to add
to a bitmap pointer in order to go down one row.
Note that ‘padding’ means the alignment of a bitmap to a byte border,
and FreeType functions normally align to the smallest possible integer
value.
For the B/W rasterizer, `pitch` is always an even number.
To change the pitch of a bitmap (say, to make it a multiple of 4), use
`Bitmap.convert`. Alternatively, you might use callback functions to
directly render to the application's surface.
"""
Bitmap_pixel_mode = """
The `PIXEL_MODE`, i.e., how pixel bits are stored.
"""
Bitmap_rows = """
The number of bitmap rows.
"""
Bitmap_to_list = """
|freetypy| Convert the bitmap to a nested list.
"""
Bitmap_width = """
The number of pixels in bitmap row.
"""
PIXEL_MODE = """
Constants related to the pixel mode of bitmaps.
- `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels
are stored in most-significant order (MSB), which means that the
left-most pixel in a byte has value 128.
- `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased
glyph images. Each pixel is stored in one byte. Note that the number
of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap
structure (it generally is 256).
- `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `GRAY4`: A 4-bit per pixel bitmap, representing embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on LCD displays; the bitmap is three times
wider than the original glyph image. See also `RENDER_MODE.LCD`. On
many freetype builds, this functionality will be disabled due to
patent restrictions, in which case the resulting bitmap will be
grayscale.
- `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on rotated LCD displays; the bitmap is three
times taller than the original glyph image. See also
`RENDER_MODE.LCD_V`. On many freetype builds, this functionality
will be disabled due to patent restrictions, in which case the
resulting bitmap will be grayscale.
"""
|
Azure/azure-sdk-for-python
|
sdk/costmanagement/azure-mgmt-costmanagement/azure/mgmt/costmanagement/operations/_generate_reservation_details_report_operations.py
|
Python
|
mit
| 15,195 | 0.005528 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class GenerateReservationDetailsReportOperations(object):
"""GenerateReservationDetailsReportOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.costmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
|
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _by_billing_account_id_initial(
self,
billing_account_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> Optional["_models.OperationStatus"]
cls = kwargs.pop('cls
|
', None) # type: ClsType[Optional["_models.OperationStatus"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._by_billing_account_id_initial.metadata['url'] # type: ignore
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['startDate'] = self._serialize.query("start_date", start_date, 'str')
query_parameters['endDate'] = self._serialize.query("end_date", end_date, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('OperationStatus', pipeline_response)
if response.status_code == 202:
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
response_headers['Retry-After']=self._deserialize('int', response.headers.get('Retry-After'))
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_by_billing_account_id_initial.metadata = {'url': '/providers/Microsoft.Billing/billingAccounts/{billingAccountId}/providers/Microsoft.CostManagement/generateReservationDetailsReport'} # type: ignore
def begin_by_billing_account_id(
self,
billing_account_id, # type: str
start_date, # type: str
end_date, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.OperationStatus"]
"""Generates the reservations details report for provided date range asynchronously based on
enrollment id.
:param billing_account_id: Enrollment ID (Legacy BillingAccount ID).
:type billing_account_id: str
:param start_date: Start Date.
:type start_date: str
:param end_date: End Date.
:type end_date: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either OperationStatus or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.costmanagement.models.OperationStatus]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationStatus"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._by_billing_account_id_initial(
billing_account_id=billing_account_id,
start_date=start_date,
end_date=end_date,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('OperationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'billingAccountId': self._serialize.url("billing_account_id", billing_account_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_
|
skarbat/picute
|
piwebengine.py
|
Python
|
mit
| 2,940 | 0.007143 |
#!/usr/bin/env python
#
# Build QT5 webengine
#
import os
import sys
import xsysroot
if __name__ == '__main__':
# You want to be careful to enable a debug build. libQtWebengine.so takes 817MB :-)
debug_build=False
build_mode='release' if not debug_build else 'debug'
# We need a xsysroot profile with QT5 built in it
if len(sys.argv) > 1:
xprofile=sys.argv[1]
else:
print 'Need a xsysroot profile'
sys.exit(1)
# Find and activate the xsysroot profile
print '>>> Opening xsysroot profile: {}'.format(xprofile)
try:
picute=xsysr
|
oot.XSysroot(profile=xprofile)
except:
print 'You need to create a Xsysroot Picute profile'
print 'Please see the README file'
sys.exit(1)
# Locate Webengine source code directory
webengine_path=os.path.join(picute.query('tmp'), 'qt5/qtwebengine')
#
|
Apply temporary patch to build QT5.7 Webengine for the RPI
# https://bugreports.qt.io/browse/QTBUG-57037
if not os.path.isdir(webengine_path):
print '>>> Could not find Webengine path: {}'.format(webengine_path)
sys.exit(1)
else:
patch_file='gyp_run.pro'
print '>>> Overwriting webengine qmake file: {}'.format(patch_file)
rc=os.system('cp {} {}/src/core'.format(patch_file, webengine_path))
if rc:
print 'Could not apply patch'
sys.exit(1)
else:
print '>>> Webengine patch has been applied'
# Now mount image if needed
print '>>> Accessing image...'
if not picute.is_mounted():
if not picute.mount():
sys.exit(1)
# Step 1: QMAKE
print '>>> Running Qmake...'
cmdline_prefix='export PKG_CONFIG_PATH={}/usr/lib/arm-linux-gnueabihf/pkgconfig'.format(picute.query('sysroot'))
print '>>> cmdline_prefix: ', cmdline_prefix
qmake_command='{}/usr/local/qt5/bin/qmake ' \
'WEBENGINE_CONFIG+=use_proprietary_codecs CONFIG+={}'.format(picute.query('sysroot'), build_mode)
print '>>> Qmake command:', qmake_command
rc=os.system('{} ; cd {} ; {}'.format(cmdline_prefix, webengine_path, qmake_command))
if rc:
print '>>> Qmake failed rc={} :-('.format(rc)
sys.exit(1)
# Step 2: MAKE
print '>>> Running Make...'
rc=os.system('{} ; cd {} ; make'.format(cmdline_prefix, webengine_path))
if rc:
print '>>> Make failed rc={} :-('.format(rc)
sys.exit(1)
# Step 3: INSTALL
print '>>> Running Make Install...'
rc=os.system('cd {} ; sudo make install'.format(webengine_path))
if rc:
print '>>> Make install failed rc={} :-('.format(rc)
sys.exit(1)
print '>>> Webengine built and installed'
# Webengine build complete: Unmount image
if not picute.umount():
print '>>> WARNING: Image is busy, most likely installation left some running processes.'
sys.exit(1)
sys.exit(0)
|
diamondman/proteusisc
|
test/test_functional.py
|
Python
|
lgpl-2.1
| 4,746 | 0.007585 |
#-*- coding: utf-8 -*-
import struct
import pytest
from proteusisc.controllerManager import getDriverInstanceForDevice
from proteusisc.jtagScanChain import JTAGScanChain
from proteusisc.test_utils import FakeUSBDev, FakeDevHandle,\
MockPhysicalJTAGDevice, FakeXPCU1Handle
from proteusisc.bittypes import bitarray, NoCareBitarray
def test_black_hole_register_constraints_three_black_holes():
#Tests that the compiler can work around black hole registers
#to get data where it needs to go. The expected behavior is
#to create three different frames, one per prim, but the frame
#state is not being tested here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d0.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("CFG_IN", data=bitarray('01101010111'))
d2.run_instruction("CFG_IN",data=bitarray('11110'))
chain.flush()
assert "110100010110101011111110" not in dev0.\
event_history, "All data written into the first black "\
"hole register. Black Holes not avoided."
#The extra zero in the arary are from shifting in the first
#bits. Some of these zeros may go away if unnecessary trailing
#bypass data is later skipped.
assert "11010001" in dev0.DRs[None].dumpData().to01()
assert "01101010111" in dev1.DRs[None].dumpData().to01()
assert "11110" in dev2.DRs[None].dumpData().to01()
def test_black_hole_register_constraints_complimentary_prims():
#Tests if a Blask Hole Read, a Black Hole Write, and a nocare
#write are combined in a way that satisfies all requests. The
#expected behavior is to combine these three non colliding prims
#into a single frame, but the frame state is not being tested
#here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d0.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("BYPASS", data=NoCareBitarray(1))
a, _ = d2.run_instruction("CFG_IN", read=True, bitcount=8)
chain.flush()
assert a() == bitarray('00000000')
assert "1101000100" in dev0.DRs[None].dumpData().to01()
XC3S1200E_ID = bitarray('00000001110000101110000010010011')
def get_XC3S1200E(chain):
return chain.initialize_device_from_id(chain, XC3S1200E_ID)
def test_black_hole_register_constraints_bad_order_complimentary_prims():
#Tests if a Blask Hole Read, a Black Hole Write, and a nocare
#write are combined in a way that satisfies all requests. The
#expected behavior is to combine these three non colliding prims
#into a single
|
frame, but the frame state is not being tested
#here... just the results in the regs.
dev0 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D0", status=bitarray('111100'))
dev1 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
name="D1", status=bitarray('111101'))
dev2 = MockPhysicalJTAGDevice(idcode=XC3S1200E_ID,
|
name="D2", status=bitarray('111110'))
usbdev = FakeUSBDev(FakeXPCU1Handle(dev0, dev1, dev2))
chain = JTAGScanChain(getDriverInstanceForDevice(usbdev))
d0, d1, d2 = get_XC3S1200E(chain), get_XC3S1200E(chain), \
get_XC3S1200E(chain)
chain._hasinit = True
chain._devices = [d0, d1, d2]
chain.jtag_enable()
d2.run_instruction("CFG_IN", data=bitarray('11010001'))
d1.run_instruction("BYPASS", data=NoCareBitarray(1))
a, _ = d1.run_instruction("CFG_IN", read=True, bitcount=8)
chain.flush()
assert a() == bitarray('00000000')
assert "1101000100" in dev2.DRs[None].dumpData().to01()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.