text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
from django.conf.urls import patterns, url
from proxy import views
urlpatterns = patterns('',
url(r'^$', views.search, name='search'),
url(r'^(?P<bucket_name>\S+?)(?P<key>/\S*)', views.get, name='get')
)
|
stuart-warren/django-s3-proxy
|
proxy/urls.py
|
Python
|
apache-2.0
| 214 | 0.004673 |
from openerp import models,fields
class OeMedicalMedicamentCategory(models.Model):
_name = 'oemedical.medicament.category'
childs = fields.One2many('oemedical.medicament.category',
'parent_id', string='Children', )
name = fields.Char(size=256, string='Name', required=True)
parent_id = fields.Many2one('oemedical.medicament.category',
string='Parent', select=True)
_constraints = [
(models.Model._check_recursion, 'Error ! You cannot create recursive \n'
'Category.', ['parent_id'])
]
|
Manexware/medical
|
oemedical/oemedical_medicament_category/oemedical_medicament_category.py
|
Python
|
gpl-2.0
| 589 | 0.011885 |
from django.shortcuts import render, HttpResponse
import os
# Create your views here.
def status(request, task_id):
from celery.result import AsyncResult
task = AsyncResult(task_id);
task.traceback_html = tracebackToHtml(task.traceback)
return render(request, 'task/html/task_status.html',
{'task': task,
'celery_url':'%s:%s' % (os.environ['VIP_FLOWER_HOST'],
os.environ['VIP_FLOWER_PORT'])})
def tracebackToHtml(txt):
html = str(txt).replace(' '*2, ' '*4)
html = html.split('\n')
html = map(lambda x: '<div style="text-indent: -4em; padding-left: 4em">' + \
x + '</div>', html)
html = '\n'.join(html)
return html
def listQueues(request):
def safe_int(i):
try:
return int(i)
except ValueError:
return None
import pyrabbit
#These values need to be unhardcoded...
client = pyrabbit.api.Client('localhost:15672', 'guest', 'guest')
names = [x['name'] for x in client.get_queues()]
tasks = [x for x in map(safe_int, names) if x is not None]
return render(request, 'task/html/task_list.html',
{'tasks': tasks})
|
andyneff/voxel-globe
|
voxel_globe/task/views.py
|
Python
|
mit
| 1,192 | 0.026846 |
from django.test import TestCase
from trix.trix_core import trix_markdown
class TestTrixMarkdown(TestCase):
def test_simple(self):
self.assertEqual(
trix_markdown.assignment_markdown('# Hello world\n'),
'<h1>Hello world</h1>')
def test_nl2br(self):
self.assertEqual(
trix_markdown.assignment_markdown('Hello\nworld'),
'<p>Hello<br>\nworld</p>')
|
devilry/trix2
|
trix/trix_core/tests/test_trix_markdown.py
|
Python
|
bsd-3-clause
| 421 | 0 |
#!/usr/bin/env python2.7
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from sys import argv,exit
from os import getuid
from PyQt4.QtGui import QApplication,QIcon
from Core.Privilege import frm_privelege
from Core.Main import Initialize
from Core.check import check_dependencies
from Modules.utils import Refactor
def ExecRootApp():
check_dependencies()
root = QApplication(argv)
app = Initialize()
app.setWindowIcon(QIcon('rsc/icon.ico'))
app.center(),app.show()
exit(root.exec_())
if __name__ == '__main__':
if not getuid() == 0:
app2 = QApplication(argv)
priv = frm_privelege()
priv.setWindowIcon(QIcon('rsc/icon.ico'))
priv.show(),app2.exec_()
exit(Refactor.threadRoot(priv.Editpassword.text()))
ExecRootApp()
|
samyoyo/3vilTwinAttacker
|
3vilTwin-Attacker.py
|
Python
|
mit
| 1,843 | 0.01465 |
from pycp2k.inputsection import InputSection
from ._dielectric_cube1 import _dielectric_cube1
from ._dirichlet_bc_cube1 import _dirichlet_bc_cube1
from ._dirichlet_cstr_charge_cube1 import _dirichlet_cstr_charge_cube1
class _implicit_psolver1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.DIELECTRIC_CUBE = _dielectric_cube1()
self.DIRICHLET_BC_CUBE = _dirichlet_bc_cube1()
self.DIRICHLET_CSTR_CHARGE_CUBE = _dirichlet_cstr_charge_cube1()
self._name = "IMPLICIT_PSOLVER"
self._subsections = {'DIRICHLET_BC_CUBE': 'DIRICHLET_BC_CUBE', 'DIRICHLET_CSTR_CHARGE_CUBE': 'DIRICHLET_CSTR_CHARGE_CUBE', 'DIELECTRIC_CUBE': 'DIELECTRIC_CUBE'}
|
SINGROUP/pycp2k
|
pycp2k/classes/_implicit_psolver1.py
|
Python
|
lgpl-3.0
| 709 | 0.002821 |
import random
from os.path import join, dirname
import numpy as np
from sklearn.base import ClassifierMixin, BaseEstimator
import fasttext as ft
from underthesea.util.file_io import write
import os
from underthesea.util.singleton import Singleton
class FastTextClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
self.estimator = None
def fit(self, X, y, model_filename=None):
"""Fit FastText according to X, y
Parameters:
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
train_file = "temp.train"
X = [x.replace("\n", " ") for x in X]
y = [item[0] for item in y]
y = [_.replace(" ", "-") for _ in y]
lines = ["__label__{} , {}".format(j, i) for i, j in zip(X, y)]
content = "\n".join(lines)
write(train_file, content)
if model_filename:
self.estimator = ft.supervised(train_file, model_filename)
else:
self.estimator = ft.supervised(train_file)
os.remove(train_file)
def predict(self, X):
return
def predict_proba(self, X):
output_ = self.estimator.predict_proba(X)
def transform_item(item):
label, score = item[0]
label = label.replace("__label__", "")
label = int(label)
if label == 0:
label = 1
score = 1 - score
return [label, score]
output_ = [transform_item(item) for item in output_]
output1 = np.array(output_)
return output1
@Singleton
class FastTextPredictor:
def __init__(self):
filepath = join(dirname(__file__), "fasttext.model")
self.estimator = ft.load_model(filepath)
def tranform_output(self, y):
y = y[0].replace("__label__", "")
y = y.replace("-", " ")
return y
def predict(self, X):
X = [X]
y_pred = self.estimator.predict(X)
y_pred = [self.tranform_output(item) for item in y_pred]
return y_pred
|
rain1024/underthesea
|
underthesea/classification/model_fasttext.py
|
Python
|
gpl-3.0
| 2,185 | 0 |
# Copyright 2016 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
from lcm.ns.vls.views import VlView, VlDetailView
urlpatterns = patterns('',
url(r'^openoapi/nslcm/v1/ns/vls$', VlView.as_view()),
url(r'^openoapi/nslcm/v1/ns/vls/(?P<vl_inst_id>[0-9a-zA-Z_-]+)$', VlDetailView.as_view()),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
open-o/nfvo
|
lcm/lcm/ns/vls/urls.py
|
Python
|
apache-2.0
| 1,036 | 0.000965 |
"""
NL2BR Extension
===============
A Python-Markdown extension to treat newlines as hard breaks; like
GitHub-flavored Markdown does.
Usage:
>>> import markdown
>>> print markdown.markdown('line 1\\nline 2', extensions=['nl2br'])
<p>line 1<br />
line 2</p>
Copyright 2011 [Brian Neal](http://deathofagremmie.com/)
Dependencies:
* [Python 2.4+](http://python.org)
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
import markdown
BR_RE = r'\n'
class Nl2BrExtension(markdown.Extension):
def extendMarkdown(self, md, md_globals):
br_tag = markdown.inlinepatterns.SubstituteTagPattern(BR_RE, 'br')
md.inlinePatterns.add('nl', br_tag, '_end')
def makeExtension(configs=None):
return Nl2BrExtension(configs)
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/markdown/extensions/nl2br.py
|
Python
|
agpl-3.0
| 765 | 0.002614 |
from django.db import models
from taggit.managers import TaggableManager
class BaseModel(models.Model):
name = models.CharField(max_length=50, unique=True)
tags = TaggableManager()
def __unicode__(self):
return self.name
class Meta(object):
abstract = True
class AlphaModel(BaseModel):
pass
class BetaModel(BaseModel):
pass
|
feuervogel/django-taggit-templatetags
|
taggit_templatetags/tests/models.py
|
Python
|
bsd-3-clause
| 379 | 0.015831 |
#!/usr/bin/env python3
"""
Calculate the total cost of tile it would take to cover a floor
plan of width and height, using a cost entered by the user.
"""
from __future__ import print_function
import argparse
import sys
class App(object):
"""Application."""
def __init__(self, args):
self._raw_args = args
self._args = None
self._argparse = argparse.ArgumentParser(
description="Calculate Fibbonaci numbers ...")
self.prepare_parser()
def prepare_parser(self):
"""Prepare Argument Parser."""
self._argparse.add_argument(
"w", type=int, help="Width")
self._argparse.add_argument(
"h", type=int, help="Height")
self._argparse.add_argument(
"c", type=float, help="Cost of Tile assuming that a tile is 1x1")
def run(self):
"""Run the application."""
self._args = self._argparse.parse_args(self._raw_args)
rez = App.get_cost(self._args.w, self._args.h, self._args.c)
output = "The cost is : {}".format(rez)
print(output)
@staticmethod
def get_cost(widht, height, cost):
"""Compute the cost."""
return (widht * height) * float(cost)
if __name__ == "__main__":
App(sys.argv[1:]).run()
|
micumatei/learning-goals
|
Probleme/Solutii/Find_Cost_of_Tile_to_Cover_WxH_Floor/mmicu/python/main.py
|
Python
|
mit
| 1,283 | 0.000779 |
#!/usr/env/python
"""
diffusion_in_gravity.py
Example of a continuous-time, stochastic, pair-based cellular automaton model,
which simulates diffusion by random particle motion in a gravitational field.
The purpose of the example is to demonstrate the use of an OrientedRasterLCA.
GT, September 2014
"""
from __future__ import print_function
_DEBUG = False
import time
from numpy import where, bitwise_and
from landlab import RasterModelGrid
from landlab.ca.celllab_cts import Transition, CAPlotter
from landlab.ca.oriented_raster_cts import OrientedRasterCTS
def setup_transition_list():
"""
Creates and returns a list of Transition() objects to represent state
transitions for a biased random walk, in which the rate of downward
motion is greater than the rate in the other three directions.
Parameters
----------
(none)
Returns
-------
xn_list : list of Transition objects
List of objects that encode information about the link-state transitions.
Notes
-----
State 0 represents fluid and state 1 represents a particle (such as a
sediment grain or dissolved heavy particle).
The states and transitions are as follows:
Pair state Transition to Process Rate
========== ============= ======= ====
0 (0-0) (none) - -
1 (0-1) 2 (1-0) left motion 1.0
2 (1-0) 1 (0-1) right motion 1.0
3 (1-1) (none) - -
4 (0/0) (none) - -
5 (0/1) 6 (1/0) down motion 1.1
6 (1/0) 5 (0/1) up motion 0.9
7 (1/1) (none) - -
"""
xn_list = []
xn_list.append( Transition((0,1,0), (1,0,0), 1., 'left motion') )
xn_list.append( Transition((1,0,0), (0,1,0), 1., 'right motion') )
xn_list.append( Transition((0,1,1), (1,0,1), 1.1, 'down motion') )
xn_list.append( Transition((1,0,1), (0,1,1), 0.9, 'up motion') )
if _DEBUG:
print()
print('setup_transition_list(): list has',len(xn_list),'transitions:')
for t in xn_list:
print(' From state',t.from_state,'to state',t.to_state,'at rate',t.rate,'called',t.name)
return xn_list
def main():
# INITIALIZE
# User-defined parameters
nr = 80
nc = 80
plot_interval = 2
run_duration = 200
report_interval = 5.0 # report interval, in real-time seconds
# Remember the clock time, and calculate when we next want to report
# progress.
current_real_time = time.time()
next_report = current_real_time + report_interval
# Create grid
mg = RasterModelGrid(nr, nc, 1.0)
# Make the boundaries be walls
mg.set_closed_boundaries_at_grid_edges(True, True, True, True)
# Set up the states and pair transitions.
ns_dict = { 0 : 'fluid', 1 : 'particle' }
xn_list = setup_transition_list()
# Create the node-state array and attach it to the grid
node_state_grid = mg.add_zeros('node', 'node_state_map', dtype=int)
# Initialize the node-state array
middle_rows = where(bitwise_and(mg.node_y>0.45*nr, mg.node_y<0.55*nr))[0]
node_state_grid[middle_rows] = 1
# Create the CA model
ca = OrientedRasterCTS(mg, ns_dict, xn_list, node_state_grid)
# Debug output if needed
if _DEBUG:
n = ca.grid.number_of_nodes
for r in range(ca.grid.number_of_node_rows):
for c in range(ca.grid.number_of_node_columns):
n -= 1
print('{0:.0f}'.format(ca.node_state[n]), end=' ')
print()
# Create a CAPlotter object for handling screen display
ca_plotter = CAPlotter(ca)
# Plot the initial grid
ca_plotter.update_plot()
# RUN
current_time = 0.0
while current_time < run_duration:
# Once in a while, print out simulation and real time to let the user
# know that the sim is running ok
current_real_time = time.time()
if current_real_time >= next_report:
print('Current sim time',current_time,'(',100*current_time/run_duration,'%)')
next_report = current_real_time + report_interval
# Run the model forward in time until the next output step
ca.run(current_time+plot_interval, ca.node_state,
plot_each_transition=False) #, plotter=ca_plotter)
current_time += plot_interval
# Plot the current grid
ca_plotter.update_plot()
# for debugging
if _DEBUG:
n = ca.grid.number_of_nodes
for r in range(ca.grid.number_of_node_rows):
for c in range(ca.grid.number_of_node_columns):
n -= 1
print('{0:.0f}'.format(ca.node_state[n]), end=' ')
print()
# FINALIZE
# Plot
ca_plotter.finalize()
if __name__ == "__main__":
main()
|
csherwood-usgs/landlab
|
landlab/ca/examples/diffusion_in_gravity.py
|
Python
|
mit
| 5,014 | 0.01077 |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.tests.functional import integrated_helpers
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class ExtensionsTest(integrated_helpers._IntegratedTestBase):
_api_version = 'v2'
def _get_flags(self):
f = super(ExtensionsTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.tests.unit.api.openstack.compute.legacy_v2.extensions.'
'foxinsocks.Foxinsocks')
return f
def test_get_foxnsocks(self):
# Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.content
LOG.debug("foxnsocks: %s" % foxnsocks)
self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
|
mmnelemane/nova
|
nova/tests/functional/test_extensions.py
|
Python
|
apache-2.0
| 1,588 | 0 |
#--coding: utf8--
from django.shortcuts import render
from templated_docs import fill_template
from templated_docs.http import FileResponse
from invoices.forms import InvoiceForm
def invoice_view(request):
form = InvoiceForm(request.POST or None)
if form.is_valid():
doctype = form.cleaned_data['format']
filename = fill_template(
'invoices/invoice.odt', form.cleaned_data,
output_format=doctype)
visible_filename = 'invoice.{}'.format(doctype)
return FileResponse(filename, visible_filename)
else:
return render(request, 'invoices/form.html', {'form': form})
|
alexmorozov/templated-docs
|
example/invoices/views.py
|
Python
|
mit
| 643 | 0.001555 |
#!/usr/bin/env python
import argparse
import json
import time
import logging
from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient
import RPi.GPIO as GPIO
parser = argparse.ArgumentParser(description='Lightbulb control unit.')
parser.add_argument('-e', '--endpoint', required=True, help='The AWS Iot endpoint.')
parser.add_argument('-r', '--rootCA', required=True, help='Root CA file path.')
parser.add_argument('-c', '--cert', required=True, help='Certificate file path.')
parser.add_argument('-k', '--key', required=True, help='Private key file path.')
args = parser.parse_args()
def lightbulbShadowCallback_Update(payload, responseStatus, token):
if responseStatus == "timeout":
print("Update request " + token + " time out!")
if responseStatus == "accepted":
payloadDict = json.loads(payload)
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Update request with token: " + token + " accepted!")
print("property: " + str(payloadDict["state"]["desired"]["color"]))
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Update request " + token + " rejected!")
def lightBulbShadowCallback_Delete(payload, responseStatus, token):
if responseStatus == "timeout":
print("Delete request " + token + " time out!")
if responseStatus == "accepted":
print("~~~~~~~~~~~~~~~~~~~~~~~")
print("Delete request with token: " + token + " accepted!")
print("~~~~~~~~~~~~~~~~~~~~~~~\n\n")
if responseStatus == "rejected":
print("Delete request " + token + " rejected!")
# Configure logging
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.DEBUG)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
# Init AWSIoTMQTTShadowClient
lightBulbShadowClient = AWSIoTMQTTShadowClient("controlUnitClient")
lightBulbShadowClient.configureEndpoint(args.endpoint, 8883)
lightBulbShadowClient.configureCredentials(args.rootCA, args.key, args.cert)
# AWSIoTMQTTShadowClient configuration
lightBulbShadowClient.configureAutoReconnectBackoffTime(1, 32, 20)
lightBulbShadowClient.configureConnectDisconnectTimeout(10) # 10 sec
lightBulbShadowClient.configureMQTTOperationTimeout(5) # 5 sec
# Connect to AWS IoT
lightBulbShadowClient.connect()
# Create a deviceShadow with persistent subscription
ControlUnit = lightBulbShadowClient.createShadowHandlerWithName("rpi-sense-hat", True)
# Delete shadow JSON doc
ControlUnit.shadowDelete(lightBulbShadowCallback_Delete, 5)
# Update shadow
def updateShadow(color):
JSONPayload = '{"state":{"desired":{"color":"' + color + '"}}}'
ControlUnit.shadowUpdate(JSONPayload, lightbulbShadowCallback_Update, 5)
RED = 9
GREEN = 10
BLUE = 11
GPIO.setmode(GPIO.BCM)
GPIO.setup(RED, GPIO.IN)
GPIO.setup(GREEN, GPIO.IN)
GPIO.setup(BLUE, GPIO.IN)
lastButton = None
while True:
if (lastButton != RED and GPIO.input(RED) == False):
lastButton = RED
updateShadow("red")
if (lastButton != GREEN and GPIO.input(GREEN) == False):
lastButton = GREEN
updateShadow("green")
if (lastButton != BLUE and GPIO.input(BLUE)== False):
lastButton = BLUE
updateShadow("blue")
time.sleep(0.05);
|
stephenjelfs/aws-iot-gddev2016
|
controlUnit.py
|
Python
|
mit
| 3,371 | 0.00445 |
# -*- coding: utf-8 -*-
# Copyright(C) 2016 Julien Veyssier
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.lyrics import CapLyrics, SongLyrics
from weboob.tools.backend import Module
from weboob.tools.compat import quote_plus
from .browser import LyricsdotcomBrowser
__all__ = ['LyricsdotcomModule']
class LyricsdotcomModule(Module, CapLyrics):
NAME = 'lyricsdotcom'
MAINTAINER = u'Julien Veyssier'
EMAIL = 'eneiluj@gmx.fr'
VERSION = '2.1'
DESCRIPTION = 'Lyrics.com lyrics website'
LICENSE = 'AGPLv3+'
BROWSER = LyricsdotcomBrowser
def get_lyrics(self, id):
return self.browser.get_lyrics(id)
def iter_lyrics(self, criteria, pattern):
return self.browser.iter_lyrics(criteria, quote_plus(pattern.encode('utf-8')))
def fill_songlyrics(self, songlyrics, fields):
if 'content' in fields:
songlyrics = self.get_lyrics(songlyrics.id)
return songlyrics
OBJECTS = {
SongLyrics: fill_songlyrics
}
|
laurentb/weboob
|
modules/lyricsdotcom/module.py
|
Python
|
lgpl-3.0
| 1,693 | 0.000591 |
"""
File: phaseStreamTest.py
Author: Matt Strader
Date: Feb 18, 2016
Firmware: pgbe0_2016_Feb_19_2018.fpg
This script inserts a phase pulse in the qdr dds table and sets up the fake adc lut. It checks snap blocks for each stage of the channelization process. In the end the phase pulse should be recovered in the phase timestream of the chosen channel.
"""
import matplotlib, time, struct
import numpy as np
import matplotlib.pyplot as plt
import casperfpga
import corr
import logging
from myQdr import Qdr as myQdr
import types
import sys
import functools
from loadWavePulseLut import loadWaveToMem,loadDdsToMem
from loadWaveLut import writeBram
from Utils.binTools import castBin
def snapDdc(fpga,bSnapAll=False,bPlot=False,selBinIndex=0,selChanIndex=0,selChanStream=0,ddsAddrTrig=0):
"""trigger and read snapshots of aligned input and data values in the firmware
INPUTS:
bSnapAll: If True, snapshot will record values for all channels, not just one
bPlot: If True, will popup a plot of snapped values
selBinIndex: the fft bin to be inspected
selChanIndex: the channel within a stream (after channel selection) to be inspected
selChanStream: which of the four simultaneous streams of channels to inspect
ddsAddrTrig: trigger when the address for the DDS look up table reaches this value (out of 2**20)
OUTPUT:
dict with keys:
'bin': complex values seen in a chosen fft bin
'chan': complex values in a chosen channel
'dds': complex values coming from the QDR look-up table
'mix': complex values after the dds mixer but before the low pass filter
'ddcOut': complex values after the DDC low pass filter and downsampling
'chanCtr': the channel numbers associated with values in 'chan','dds','mix','ddcOut'.
If bSnapAll=False, these should all equal selChanIndex
'expectedMix': the values of 'chan' multiplied by 'dds'. Hopefully this matches the values in
'mix'.
"""
#set up the snapshots to record the selected bin/channel
fpga.write_int('sel_bin',selBinIndex)
fpga.write_int('sel_bch',selChanIndex)
fpga.write_int('sel_stream',selChanStream)
fpga.write_int('sel_ctr',ddsAddrTrig)
snapshotNames = ['snp2_bin_ss','snp2_ch_ss','snp2_dds_ss','snp2_mix_ss','snp2_ctr_ss','snp3_ddc_ss','snp3_cap_ss']
for name in snapshotNames:
fpga.snapshots[name].arm(man_valid=bSnapAll)
time.sleep(.1)
fpga.write_int('trig_buf',1)#trigger snapshots
time.sleep(.1) #wait for other trigger conditions to be met
fpga.write_int('trig_buf',0)#release trigger
#in most of the snapshots, we get two IQ values per cycle (I[t=0],Q[t=0]) and (I[t=1],Q[t=1])
#Retrieve them separately and then interleave them
binData = fpga.snapshots['snp2_bin_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
i0 = np.array(binData['i0'])
i1 = np.array(binData['i1'])
q0 = np.array(binData['q0'])
q1 = np.array(binData['q1'])
#interleave values from alternating cycles (I0,Q0) and (I1,Q1)
bi = np.vstack((i0,i1)).flatten('F')
bq = np.vstack((q0,q1)).flatten('F')
chanData = fpga.snapshots['snp2_ch_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ci0 = np.array(chanData['i0'])
ci1 = np.array(chanData['i1'])
cq0 = np.array(chanData['q0'])
cq1 = np.array(chanData['q1'])
ci = np.vstack((ci0,ci1)).flatten('F')
cq = np.vstack((cq0,cq1)).flatten('F')
ddsData = fpga.snapshots['snp2_dds_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
di0 = np.array(ddsData['i0'])
di1 = np.array(ddsData['i1'])
dq0 = np.array(ddsData['q0'])
dq1 = np.array(ddsData['q1'])
#interleave i0 and i1 values
di = np.vstack((di0,di1)).flatten('F')
dq = np.vstack((dq0,dq1)).flatten('F')
expectedMix = (ci+1.j*cq)*(di-1.j*dq)
mixerData = fpga.snapshots['snp2_mix_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
mi0 = np.array(mixerData['i0'])
mi1 = np.array(mixerData['i1'])
mq0 = np.array(mixerData['q0'])
mq1 = np.array(mixerData['q1'])
#interleave i0 and i1 values
mi = np.vstack((mi0,mi1)).flatten('F')
mq = np.vstack((mq0,mq1)).flatten('F')
#The low-pass filter in the DDC stage downsamples by 2, so we only get one sample per cycle here
ddcData = fpga.snapshots['snp3_ddc_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
li = np.array(ddcData['i0'])
lq = np.array(ddcData['q0'])
rawPhase = np.array(ddcData['raw_phase'])
phaseData = fpga.snapshots['snp3_cap_ss'].read(timeout=5,arm=False,man_valid=bSnapAll)['data']
filtPhase = np.array(phaseData['phase'])
basePhase = np.array(phaseData['base'])
trig = np.array(phaseData['trig'],dtype=np.bool)
trig2 = np.array(phaseData['trig_raw'],dtype=np.bool)
ctrData = fpga.snapshots['snp2_ctr_ss'].read(timeout=5,arm=False, man_valid=bSnapAll)['data']
ctr = np.array(ctrData['ctr']) #the channel counter (0-256)
dctr = np.array(ctrData['dctr']) #the dds lut address counter (0-2**20)
if bPlot:
#we have the same number of samples from the lpf/downsample as everything else, but the each one
#corresponds to every other timesample in the others. So leave off the second half of lpf samples
#so the samples we have correspond to the same time period as the others, at least when plotting.
liSample = li[0:len(mi)/2]
fig,ax = plt.subplots(1,1)
ax.plot(di,'r.-',label='dds')
ax.plot(bi,'bv-',label='bin')
ax.plot(ci,'g.-',label='channel')
ax.plot(mi,'mo-',label='mix')
ddcTimes = 2.*np.arange(0,len(liSample))
ax.plot(ddcTimes,liSample,'k.-',label='ddcOut')
ax.set_title('I')
ax.legend(loc='best')
return {'bin':(bi+1.j*bq),'chan':(ci+1.j*cq),'dds':(di+1.j*dq),'mix':(mi+1.j*mq),'ddcOut':(li+1.j*lq),'chanCtr':ctr,'ddsCtr':dctr,'expectedMix':expectedMix,'rawPhase':rawPhase,'filtPhase':filtPhase,'trig':trig,'trig2':trig2,'basePhase':basePhase}
def setSingleChanSelection(fpga,selBinNums=[0,0,0,0],chanNum=0):
"""assigns bin numbers to a single channel (in each stream), to configure chan_sel block
INPUTS:
selBinNums: 4 bin numbers (for 4 streams) to be assigned to chanNum
chanNum: the channel number to be assigned
"""
nStreams = 4
if len(selBinNums) != nStreams:
raise TypeError,'selBinNums must have number of elements matching number of streams in firmware'
fpga.write_int('chan_sel_load',0) #set to zero so nothing loads while we set other registers.
#assign the bin number to be loaded to each stream
fpga.write_int('chan_sel_ch_bin0',selBinNums[0])
fpga.write_int('chan_sel_ch_bin1',selBinNums[1])
fpga.write_int('chan_sel_ch_bin2',selBinNums[2])
fpga.write_int('chan_sel_ch_bin3',selBinNums[3])
time.sleep(.1)
#in the register chan_sel_load, the lsb initiates the loading of the above bin numbers into memory
#the 8 bits above the lsb indicate which channel is being loaded (for all streams)
loadVal = (chanNum << 1) + 1
fpga.write_int('chan_sel_load',loadVal)
time.sleep(.1) #give it a chance to load
fpga.write_int('chan_sel_load',0) #stop loading
def startStream(fpga,selChanIndex=0):
"""initiates streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
selChanIndex: which channel to stream
"""
dest_ip =167772210 #corresponds to IP 10.0.0.50
fabric_port=50000
pktsPerFrame = 100 #how many 8byte words to accumulate before sending a frame
#configure the gbe core,
print 'restarting'
fpga.write_int('stream_phase_gbe64_dest_ip',dest_ip)
fpga.write_int('stream_phase_gbe64_dest_port',fabric_port)
fpga.write_int('stream_phase_gbe64_words_per_frame',pktsPerFrame)
#reset the core to make sure it's in a clean state
fpga.write_int('stream_phase_gbe64_rst_core',1)
time.sleep(.1)
fpga.write_int('stream_phase_gbe64_rst_core',0)
#choose what channel to stream
fpga.write_int('stream_phase_ch_we',selChanIndex)
#reset the counter for how many packets are waiting to send
fpga.write_int('stream_phase_ctr_rst',1)
time.sleep(.1)
fpga.write_int('stream_phase_ctr_rst',0)
#turn it on
fpga.write_int('stream_phase_on',1)
def setThresh(fpga,thresholdDeg = -15.):
"""Sets the phase threshold and baseline filter for photon pulse detection triggers in each channel
INPUTS:
thresholdDeg: The threshold in degrees. The phase must drop below this value to trigger a photon
event
"""
#convert deg to radians
thresholdRad = thresholdDeg * np.pi/180.
#format it as a fix16_13 to be placed in a register
binThreshold = castBin(thresholdRad,quantization='Round',nBits=16,binaryPoint=13,format='uint')
sampleRate = 1.e6
#for the baseline, we apply a second order state variable low pass filter to the phase
#See http://www.earlevel.com/main/2003/03/02/the-digital-state-variable-filter/
#The filter takes two parameters based on the desired Q factor and cutoff frequency
criticalFreq = 200 #Hz
Q=.7
baseKf=2*np.sin(np.pi*criticalFreq/sampleRate)
baseKq=1./Q
#format these paramters as fix18_16 values to be loaded to registers
binBaseKf=castBin(baseKf,quantization='Round',nBits=18,binaryPoint=16,format='uint')
binBaseKq=castBin(baseKq,quantization='Round',nBits=18,binaryPoint=16,format='uint')
print 'threshold',thresholdDeg,binThreshold
print 'Kf:',baseKf,binBaseKf
print 'Kq:',baseKq,binBaseKq
#load the values in
fpga.write_int('capture0_base_kf',binBaseKf)
fpga.write_int('capture0_base_kq',binBaseKq)
fpga.write_int('capture0_threshold',binThreshold)
fpga.write_int('capture0_load_thresh',1)
time.sleep(.1)
fpga.write_int('capture0_load_thresh',0)
def stopStream(fpga):
"""stops streaming of phase timestream (after prog_fir) to the 1Gbit ethernet
INPUTS:
fpga: the casperfpga instance
"""
fpga.write_int('stream_phase_on',0)
if __name__=='__main__':
#Get the IP of the casperfpga from the command line
if len(sys.argv) > 1:
ip = sys.argv[1]
else:
ip='10.0.0.112'
print ip
fpga = casperfpga.katcp_fpga.KatcpFpga(ip,timeout=50.)
time.sleep(1)
if not fpga.is_running():
print 'Firmware is not running. Start firmware, calibrate, and load wave into qdr first!'
exit(0)
fpga.get_system_information()
bLoadAddr = False #set up chan_sel block
bLoadDds = False #compute and load dds table into qdr memory
bLoadFir = False #load fir coefficients into prog_fir block for each channel
bLoadDac = False #load probe tones into bram for dac/adc simulation block
bSetThresh = False #set the photon phase trigger threshold in the capture block
bStreamPhase = False #initiate stream of phase timestream to ethernet for selected channel
instrument = 'darkness'
startRegisterName = 'run'
#collect the names of bram blocks in firmware for the dac/adc simulator block
memNames = ['dac_lut_mem0','dac_lut_mem1','dac_lut_mem2']
memType='bram'
nBins = 2048
nChannels = 1024
nChannelsPerStream = 256
MHz = 1.e6
#parameters for dac look-up table (lut)
sampleRate = 2.e9
nSamplesPerCycle = 8
nLutRowsToUse = 2**11
nBytesPerMemSample = 8
nBitsPerSamplePair = 24
dynamicRange = .05
nSamples=nSamplesPerCycle*nLutRowsToUse
binSpacing = sampleRate/nBins
dacFreqResolution = sampleRate/nSamples
#set the frequency of what the resonator would be. We will set the ddc to target this frequency
resFreq = 7.32421875e6 #already quantized
quantizedResFreq = np.round(resFreq/dacFreqResolution)*dacFreqResolution
genBinIndex = resFreq/binSpacing
selBinIndex = np.round(genBinIndex)
selChanIndex = 0
selChanStream = 0
ddsAddrTrig = 0
binCenterFreq = selBinIndex*binSpacing
#parameters for dds look-up table (lut)
nDdsSamplesPerCycle = 2
fpgaClockRate = 250.e6
nCyclesToLoopToSameChannel = nChannelsPerStream
nQdrRows = 2**20
nBytesPerQdrSample = 8
nBitsPerDdsSamplePair = 32
ddsSampleRate = nDdsSamplesPerCycle * fpgaClockRate / nCyclesToLoopToSameChannel
nDdsSamples = nDdsSamplesPerCycle*nQdrRows/nCyclesToLoopToSameChannel
print 'N dds samples',nDdsSamples
ddsFreqResolution = 1.*ddsSampleRate/nDdsSamples
ddsFreq = quantizedResFreq - binCenterFreq
print 'unrounded dds freq',ddsFreq/MHz
#quantize the dds freq according to its resolution
ddsFreq = np.round(ddsFreq/ddsFreqResolution)*ddsFreqResolution
ddsFreqs = np.zeros(nChannels)
ddsFreqs[selChanIndex] = ddsFreq
ddsPhases = np.zeros(nChannels)
print 'dac freq resoluton',dacFreqResolution
print 'resonator freq',resFreq/MHz
print 'quantized resonator freq',quantizedResFreq/MHz
print 'bin center freq',binCenterFreq/MHz
print 'dds sampleRate',ddsSampleRate/MHz,'MHz. res',ddsFreqResolution/MHz,'MHz'
print 'dds freq',ddsFreq
print 'gen bin index',genBinIndex
print 'bin index',selBinIndex
print 'channel',selChanIndex
#set the delay between the dds lut and the end of the fft block (firmware dependent)
ddsShift = 76+256
fpga.write_int('dds_shift',ddsShift)
#set list of bins to save in the channel selection block
if bLoadAddr:
setSingleChanSelection(fpga,selBinNums=[selBinIndex,0,0,0],chanNum=selChanIndex)
if bLoadDds:
pulseDict = {'ampDeg':30.,'arrivalTime':50.e-6}
otherPulseDict = {'arrivalTime':0}
pulseDicts = np.append(pulseDict,[otherPulseDict]*(len(ddsFreqs)-1))
print 'loading dds freqs'
fpga.write_int(startRegisterName,0) #do not read from qdr while writing
loadDict = loadDdsToMem(fpga,waveFreqs=ddsFreqs,phases=ddsPhases,sampleRate=ddsSampleRate,nSamplesPerCycle=nDdsSamplesPerCycle,nBitsPerSamplePair=nBitsPerDdsSamplePair,nSamples=nDdsSamples,phasePulseDicts=pulseDicts)
nTaps = 26
nFirBits = 12
firBinPt = 9
if bLoadFir:
print 'loading programmable FIR filter coefficients'
for iChan in xrange(nChannelsPerStream):
print iChan
fpga.write_int('prog_fir0_load_chan',0)
time.sleep(.1)
fir = np.loadtxt('/home/kids/SDR/Projects/Filters/matched_50us.txt')
#fir = np.arange(nTaps,dtype=np.uint32)
#firInts = np.left_shift(fir,5)
#fir = np.zeros(nTaps)
#fir = np.ones(nTaps)
#fir[1] = 1./(1.+iChan)
#nSmooth=4
#fir[-nSmooth:] = 1./nSmooth
firInts = np.array(fir*(2**firBinPt),dtype=np.int32)
writeBram(fpga,'prog_fir0_single_chan_coeffs',firInts,nRows=nTaps,nBytesPerSample=4)
time.sleep(.1)
loadVal = (1<<8) + iChan #first bit indicates we will write, next 8 bits is the chan number
fpga.write_int('prog_fir0_load_chan',loadVal)
time.sleep(.1)
fpga.write_int('prog_fir0_load_chan',0)
toneFreq = quantizedResFreq #resFreq + dacFreqResolution
if bLoadDac:
print 'loading dac lut'
waveFreqs = [toneFreq]
phases = [1.39]
loadDict = loadWaveToMem(fpga,waveFreqs=waveFreqs,phases=phases,sampleRate=sampleRate,nSamplesPerCycle=nSamplesPerCycle,nBytesPerMemSample=nBytesPerMemSample,nBitsPerSamplePair=nBitsPerSamplePair,memNames = memNames,nSamples=nSamples,memType=memType,dynamicRange=dynamicRange)
if bSetThresh:
setThresh(fpga)
if bStreamPhase:
startStream(fpga)
fpga.write_int(startRegisterName,1)
#fpga.write_int('sel_bch',selChanIndex)
snapDict = snapDdc(fpga,bSnapAll=False,selBinIndex=selBinIndex,selChanIndex=0,selChanStream=selChanStream,ddsAddrTrig=ddsAddrTrig,bPlot=True)
rawPhase = snapDict['rawPhase']
mix = snapDict['mix']
filtPhase = snapDict['filtPhase']
basePhase = snapDict['basePhase']
trig = np.roll(snapDict['trig'],-2) #there is an extra 2 cycle delay in firmware between we_out and phase
trig2 = np.roll(snapDict['trig2'],-2)
print np.sum(trig),np.sum(trig2)
mixPhaseDeg = 180./np.pi*np.angle(mix)
rawPhaseDeg = 180./np.pi*rawPhase
filtPhaseDeg = 180./np.pi*filtPhase
basePhaseDeg = 180./np.pi*basePhase
trigPhases = filtPhaseDeg[trig]
trig2Phases = filtPhaseDeg[trig2]
dt = nChannelsPerStream/fpgaClockRate
t = dt*np.arange(len(rawPhase))
t2 = (dt/2.)*np.arange(len(mixPhaseDeg)) #two IQ points per cycle are snapped
t3 = dt*np.arange(len(filtPhase))
trigTimes = t3[trig]
trig2Times = t3[trig2]
fig,ax = plt.subplots(1,1)
ax.plot(t/1.e-6,rawPhaseDeg,'k.-',label='raw')
ax.plot(t2/1.e-6,mixPhaseDeg,'r.-',label='mix')
ax.plot(t3/1.e-6,filtPhaseDeg,'b.-',label='filt')
ax.plot(t3/1.e-6,basePhaseDeg,'m.--',label='filt')
ax.plot(trigTimes/1.e-6,trigPhases,'mo',label='trig')
ax.plot(trig2Times/1.e-6,trig2Phases,'gv')
ax.set_ylabel('phase ($^{\circ}$)')
ax.set_xlabel('time ($\mu$s)')
ax.legend(loc='best')
plt.show()
stopStream(fpga)
print 'done!'
|
bmazin/SDR
|
Projects/FirmwareTests/darkDebug/phaseStreamTest.py
|
Python
|
gpl-2.0
| 17,347 | 0.022655 |
from . import stock_move
from . import product_product
|
OCA/stock-logistics-warehouse
|
stock_move_auto_assign/models/__init__.py
|
Python
|
agpl-3.0
| 55 | 0 |
#!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, gr_unittest, blocks
import math
class test_max(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_001(self):
src_data = [0,0.2,-0.3,0,12,0]
expected_result = [float(max(src_data)),]
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_002(self):
src_data=[-100,-99,-98,-97,-96,-1]
expected_result = [float(max(src_data)),]
src = blocks.vector_source_f(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_float, len(src_data))
op = blocks.max_ff(len(src_data))
dst = blocks.vector_sink_f()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_003(self):
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_result = [float(max(x,y)) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_f(src_data0)
src1 = blocks.vector_source_f(src_data1)
op = blocks.max_ff(1)
dst = blocks.vector_sink_f()
self.tb.connect(src0, (op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_004(self):
dim = 2
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_data = []
tmp = [float(max(x,y)) for x,y in zip(src_data0, src_data1)]
for i in range(len(tmp) / dim):
expected_data.append(float(max(tmp[i*dim:(i+1)*dim])))
src0 = blocks.vector_source_f(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_float,dim)
src1 = blocks.vector_source_f(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_float,dim)
op = blocks.max_ff(dim)
dst = blocks.vector_sink_f()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s001(self):
src_data = [0, 2, -3, 0, 12, 0]
expected_result = [max(src_data),]
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short,len(src_data))
op = blocks.max_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s002(self):
src_data=[-100,-99,-98,-97,-96,-1]
expected_result = [max(src_data),]
src = blocks.vector_source_s(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_short, len(src_data))
op = blocks.max_ss(len(src_data))
dst = blocks.vector_sink_s()
self.tb.connect(src, s2v, op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s003(self):
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_result = [max(x,y) for x,y in zip(src_data0, src_data1)]
src0 = blocks.vector_source_s(src_data0)
src1 = blocks.vector_source_s(src_data1)
op = blocks.max_ss(1)
dst = blocks.vector_sink_s()
self.tb.connect(src0, (op, 0))
self.tb.connect(src1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
def stest_s004(self):
dim = 2
src_data0 = [0, 2, -3, 0, 12, 0]
src_data1 = [1, 1, 1, 1, 1, 1]
expected_data = []
tmp = [max(x,y) for x,y in zip(src_data0, src_data1)]
for i in range(len(tmp) / dim):
expected_data.append(max(tmp[i*dim:(i+1)*dim]))
src0 = blocks.vector_source_s(src_data0)
s2v0 = blocks.stream_to_vector(gr.sizeof_short,dim)
src1 = blocks.vector_source_s(src_data1)
s2v1 = blocks.stream_to_vector(gr.sizeof_short,dim)
op = blocks.max_ss(dim)
dst = blocks.vector_sink_s()
self.tb.connect(src0, s2v0, (op, 0))
self.tb.connect(src1, s2v1, (op, 1))
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertEqual(expected_result, result_data)
if __name__ == '__main__':
gr_unittest.run(test_max, "test_max.xml")
|
trabucayre/gnuradio
|
gr-blocks/python/blocks/qa_max.py
|
Python
|
gpl-3.0
| 5,142 | 0.00739 |
def pe0001(upto):
total = 0
for i in range(upto):
if i % 3 == 0 or i % 5 == 0:
total += i
return total
print(pe0001(1000))
|
guandalf/projecteuler
|
pe0001.py
|
Python
|
mit
| 155 | 0.012903 |
# Case Conductor is a Test Case Management system.
# Copyright (C) 2011 uTest Inc.
#
# This file is part of Case Conductor.
#
# Case Conductor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Case Conductor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Case Conductor. If not, see <http://www.gnu.org/licenses/>.
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from ..core.util import get_object_or_404
from ..users.decorators import login_redirect
from ..testexecution.models import TestRunList
from .forms import EnvironmentSelectionForm
@login_redirect
def set_environment(request, testrun_id):
"""
Given a test run ID, allow the user to choose a valid environment-group
from among those valid for that test run, set that environment-group ID in
the user's session, and redirect to that test run.
"""
run = get_object_or_404(TestRunList, testrun_id, auth=request.auth)
form = EnvironmentSelectionForm(
request.POST or None,
groups=run.environmentgroups_prefetch,
current=request.session.get("environments", None))
if request.method == "POST" and form.is_valid():
request.session["environments"] = form.save()
return redirect("runtests_run", testrun_id=testrun_id)
return TemplateResponse(
request,
"runtests/environment.html",
{"form": form,
"testrun": run,
})
|
mozilla/caseconductor-ui
|
ccui/environments/views.py
|
Python
|
gpl-3.0
| 1,892 | 0.002643 |
import logging
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.http import Http404
from readthedocs.projects.models import Project, Domain
log = logging.getLogger(__name__)
LOG_TEMPLATE = u"(Middleware) {msg} [{host}{path}]"
class SubdomainMiddleware(object):
def process_request(self, request):
host = request.get_host().lower()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
if settings.DEBUG:
log.debug(LOG_TEMPLATE.format(msg='DEBUG on, not processing middleware', **log_kwargs))
return None
if ':' in host:
host = host.split(':')[0]
domain_parts = host.split('.')
# Serve subdomains - but don't depend on the production domain only having 2 parts
if len(domain_parts) == len(settings.PRODUCTION_DOMAIN.split('.')) + 1:
subdomain = domain_parts[0]
is_www = subdomain.lower() == 'www'
is_ssl = subdomain.lower() == 'ssl'
if not is_www and not is_ssl and settings.PRODUCTION_DOMAIN in host:
request.subdomain = True
request.slug = subdomain
request.urlconf = 'readthedocs.core.subdomain_urls'
return None
# Serve CNAMEs
if settings.PRODUCTION_DOMAIN not in host and \
'localhost' not in host and \
'testserver' not in host:
request.cname = True
domains = Domain.objects.filter(domain=host)
if domains.count():
for domain in domains:
if domain.domain == host:
request.slug = domain.project.slug
request.urlconf = 'core.subdomain_urls'
request.domain_object = True
domain.count = domain.count + 1
domain.save()
log.debug(LOG_TEMPLATE.format(
msg='Domain Object Detected: %s' % domain.domain, **log_kwargs))
break
if not hasattr(request, 'domain_object') and 'HTTP_X_RTD_SLUG' in request.META:
request.slug = request.META['HTTP_X_RTD_SLUG'].lower()
request.urlconf = 'readthedocs.core.subdomain_urls'
request.rtdheader = True
log.debug(LOG_TEMPLATE.format(
msg='X-RTD-Slug header detetected: %s' % request.slug, **log_kwargs))
# Try header first, then DNS
elif not hasattr(request, 'domain_object'):
try:
slug = cache.get(host)
if not slug:
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode().lower()
slug = domain.split('.')[0]
cache.set(host, slug, 60 * 60)
# Cache the slug -> host mapping permanently.
log.debug(LOG_TEMPLATE.format(
msg='CNAME cached: %s->%s' % (slug, host),
**log_kwargs))
request.slug = slug
request.urlconf = 'readthedocs.core.subdomain_urls'
log.debug(LOG_TEMPLATE.format(
msg='CNAME detetected: %s' % request.slug,
**log_kwargs))
try:
proj = Project.objects.get(slug=slug)
domain, created = Domain.objects.get_or_create(
project=proj,
domain=host,
)
if created:
domain.machine = True
domain.cname = True
domain.count = domain.count + 1
domain.save()
except (ObjectDoesNotExist, MultipleObjectsReturned):
log.debug(LOG_TEMPLATE.format(
msg='Project CNAME does not exist: %s' % slug,
**log_kwargs))
except:
# Some crazy person is CNAMEing to us. 404.
log.exception(LOG_TEMPLATE.format(msg='CNAME 404', **log_kwargs))
raise Http404(_('Invalid hostname'))
# Google was finding crazy www.blah.readthedocs.org domains.
# Block these explicitly after trying CNAME logic.
if len(domain_parts) > 3:
# Stop www.fooo.readthedocs.org
if domain_parts[0] == 'www':
log.debug(LOG_TEMPLATE.format(msg='404ing long domain', **log_kwargs))
raise Http404(_('Invalid hostname'))
log.debug(LOG_TEMPLATE.format(msg='Allowing long domain name', **log_kwargs))
# raise Http404(_('Invalid hostname'))
# Normal request.
return None
class SingleVersionMiddleware(object):
"""Reset urlconf for requests for 'single_version' docs.
In settings.MIDDLEWARE_CLASSES, SingleVersionMiddleware must follow
after SubdomainMiddleware.
"""
def _get_slug(self, request):
"""Get slug from URLs requesting docs.
If URL is like '/docs/<project_name>/', we split path
and pull out slug.
If URL is subdomain or CNAME, we simply read request.slug, which is
set by SubdomainMiddleware.
"""
slug = None
if hasattr(request, 'slug'):
# Handle subdomains and CNAMEs.
slug = request.slug.lower()
else:
# Handle '/docs/<project>/' URLs
path = request.get_full_path()
path_parts = path.split('/')
if len(path_parts) > 2 and path_parts[1] == 'docs':
slug = path_parts[2].lower()
return slug
def process_request(self, request):
slug = self._get_slug(request)
if slug:
try:
proj = Project.objects.get(slug=slug)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# Let 404 be handled further up stack.
return None
if getattr(proj, 'single_version', False):
request.urlconf = 'readthedocs.core.single_version_urls'
# Logging
host = request.get_host()
path = request.get_full_path()
log_kwargs = dict(host=host, path=path)
log.debug(LOG_TEMPLATE.format(
msg='Handling single_version request', **log_kwargs)
)
return None
|
SteveViss/readthedocs.org
|
readthedocs/core/middleware.py
|
Python
|
mit
| 6,904 | 0.001593 |
#!/usr/bin/env python3
import argparse
import dataclasses
from array import array
from .edr import (EDRHeader, EDRDisplayDataHeader, EDRSpectralDataHeader)
def parse_args():
parser = argparse.ArgumentParser(description='Print .edr file')
parser.add_argument('edr',
type=argparse.FileType('rb'),
help='.edr input filename')
return parser.parse_args()
def main():
args = parse_args()
print('EDR Header:')
edr_header = EDRHeader.unpack_from(args.edr.read(EDRHeader.struct.size))
print_dataclass(edr_header, indent=1)
for set_num in range(1, edr_header.num_sets + 1):
print('Set {}'.format(set_num))
print('\tDisplay Data Header:')
edr_header = EDRDisplayDataHeader.unpack_from(
args.edr.read(EDRDisplayDataHeader.struct.size))
print_dataclass(edr_header, indent=2)
print('\tSpectral Data Header:')
spec_header = EDRSpectralDataHeader.unpack_from(
args.edr.read(EDRSpectralDataHeader.struct.size))
print_dataclass(spec_header, indent=2)
spec_data = args.edr.read(8 *
spec_header.num_samples) # array of doubles
spec_data_arr = array('d', spec_data)
print('\tSpectral Data: {!s}'.format(spec_data_arr.tolist()))
def print_dataclass(obj, indent=0):
for field in dataclasses.fields(obj):
print('{}{}: {!r}'.format('\t' * indent, field.name,
getattr(obj, field.name)))
if __name__ == '__main__':
main()
|
illicium/ccss2edr
|
ccss2edr/dumpedr.py
|
Python
|
mit
| 1,581 | 0 |
'''
Motion Event Provider
=====================
Abstract class for the implementation of a
:class:`~kivy.input.motionevent.MotionEvent`
provider. The implementation must support the
:meth:`~MotionEventProvider.start`, :meth:`~MotionEventProvider.stop` and
:meth:`~MotionEventProvider.update` methods.
'''
__all__ = ('MotionEventProvider', )
class MotionEventProvider(object):
'''Base class for a provider.
'''
def __init__(self, device, args):
self.device = device
if self.__class__ == MotionEventProvider:
raise NotImplementedError('class MotionEventProvider is abstract')
def start(self):
'''Start the provider. This method is automatically called when the
application is started and if the configuration uses the current
provider.
'''
pass
def stop(self):
'''Stop the provider.
'''
pass
def update(self, dispatch_fn):
'''Update the provider and dispatch all the new touch events though the
`dispatch_fn` argument.
'''
pass
|
Cheaterman/kivy
|
kivy/input/provider.py
|
Python
|
mit
| 1,082 | 0 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-17 19:19
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orders', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='order',
options={'ordering': ['-created'], 'verbose_name': 'Замовлення', 'verbose_name_plural': 'Замовлення'},
),
migrations.RemoveField(
model_name='order',
name='address',
),
migrations.RemoveField(
model_name='order',
name='email',
),
migrations.RemoveField(
model_name='order',
name='postal_code',
),
migrations.AddField(
model_name='order',
name='carrier',
field=models.CharField(default='Нова пошта', max_length=250, verbose_name='Перевізник'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='phone_num',
field=models.CharField(default='(050) 123-45-67', max_length=20, verbose_name='Номер телефону'),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='user',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='order',
name='warehouse_num',
field=models.PositiveIntegerField(default=1, verbose_name='Номер складу'),
preserve_default=False,
),
]
|
samitnuk/online_shop
|
apps/orders/migrations/0002_auto_20170317_2119.py
|
Python
|
mit
| 1,976 | 0.002614 |
from django import template
from django.utils.safestring import mark_safe
register = template.Library()
@register.filter
def img_tag(obj, cls=""):
if hasattr(obj, "img"):
if obj.img:
return mark_safe("<img class='" + cls + "' src='" + obj.img.url + "'/>")
return mark_safe("<span class='glyphicon glyphicon-picture " + cls + "'></span>")
@register.filter
def concat(obj, other):
try:
return str(obj) + str(other)
except:
return ""
@register.filter
def object_link(obj):
try:
return ("/" + type(obj).__name__ + "/" + str(obj.id) + "/").lower()
except:
return ""
@register.filter
def object_anchor(obj):
return mark_safe("<a href='" + object_link(obj) + "'>" + str(obj) + "</a>")
|
OmegaDroid/quokka
|
utils/templatetags/utils.py
|
Python
|
mit
| 767 | 0.005215 |
'''
Created on Mar 19, 2014
@author: Simon
'''
from engine.engine_job import EngineJob
class ValidationJob(EngineJob):
'''
M/R job for validating a trained model.
'''
def mapper(self, key, values):
data_processor = self.get_data_processor()
data_processor.set_data(values)
data_processor.normalize_data(self.data_handler.get_statistics())
data_set = data_processor.get_data_set()
alg = self.get_trained_alg()
validator = self.get_validator()
yield 'validation', validator.validate(alg, data_set)
def reducer(self, key, values):
vals = list(values)
yield key, self.get_validator().aggregate(vals)
if __name__ == '__main__':
ValidationJob.run()
|
xapharius/mrEnsemble
|
Engine/src/jobs/validation_job.py
|
Python
|
mit
| 746 | 0.00134 |
# -*- coding: utf-8 -*-
from __future__ import division
from odoo import api, fields, models
class A(models.Model):
_name = 'test_testing_utilities.a'
_description = 'Testing Utilities A'
f1 = fields.Char(required=True)
f2 = fields.Integer(default=42)
f3 = fields.Integer()
f4 = fields.Integer(compute='_compute_f4')
f5 = fields.Integer()
f6 = fields.Integer()
@api.onchange('f2')
def _on_change_f2(self):
self.f3 = int(self.f2 / 2)
self.f5 = self.f2
self.f6 = self.f2
@api.depends('f1', 'f2')
def _compute_f4(self):
for r in self:
r.f4 = r.f2 / (int(r.f1) or 1)
class B(models.Model):
_name = 'test_testing_utilities.readonly'
_description = 'Testing Utilities Readonly'
f1 = fields.Integer(default=1, readonly=True)
f2 = fields.Integer(compute='_compute_f2')
@api.depends('f1')
def _compute_f2(self):
for r in self:
r.f2 = 2 * r.f1
class C(models.Model):
_name = 'test_testing_utilities.c'
_description = 'Testing Utilities C'
name = fields.Char("name", required=True)
f2 = fields.Many2one('test_testing_utilities.m2o')
@api.onchange('f2')
def _on_change_f2(self):
self.name = self.f2.name
class M2O(models.Model):
_name = 'test_testing_utilities.m2o'
_description = 'Testing Utilities Many To One'
name = fields.Char(required=True)
class M2Onchange(models.Model):
_name = 'test_testing_utilities.d'
_description = 'Testing Utilities D'
# used to check that defaults & onchange to m2o work
f = fields.Many2one(
'test_testing_utilities.m2o',
required=True,
default=lambda self: self.env['test_testing_utilities.m2o'].search(
[], limit=1
)
)
f2 = fields.Char()
@api.onchange('f2')
def _on_change_f2(self):
self.f = self.env['test_testing_utilities.m2o'].search([
('name', 'ilike', self.f2),
], limit=1) if self.f2 else False
class M2MChange(models.Model):
_name = 'test_testing_utilities.e'
_description = 'Testing Utilities E'
m2m = fields.Many2many('test_testing_utilities.sub2')
count = fields.Integer(compute='_m2m_count')
@api.depends('m2m')
def _m2m_count(self):
for r in self:
r.count = len(r.m2m)
class M2MSub(models.Model):
_name = 'test_testing_utilities.sub2'
_description = 'Testing Utilities Subtraction 2'
name = fields.Char()
class M2MChange2(models.Model):
_name = 'test_testing_utilities.f'
_description = 'Testing Utilities F'
def _get_some(self):
r = self.env['test_testing_utilities.sub2'].search([], limit=2)
return r
m2m = fields.Many2many(
'test_testing_utilities.sub2',
default=_get_some,
)
m2o = fields.Many2one('test_testing_utilities.sub2')
@api.onchange('m2o')
def _on_change_m2o(self):
self.m2m = self.m2m | self.m2o
class M2MReadonly(models.Model):
_name = 'test_testing_utilities.g'
_description = 'Testing Utilities G'
m2m = fields.Many2many('test_testing_utilities.sub3', readonly=True)
class M2MSub3(models.Model):
_name = 'test_testing_utilities.sub3'
_description = 'Testing Utilities Subtraction 3'
name = fields.Char()
class O2MChange(models.Model):
_name = 'test_testing_utilities.parent'
_description = 'Testing Utilities Parent'
value = fields.Integer(default=1)
v = fields.Integer()
subs = fields.One2many('test_testing_utilities.sub', 'parent_id')
@api.onchange('value', 'subs')
def _onchange_values(self):
self.v = self.value + sum(s.value for s in self.subs)
class O2MSub(models.Model):
_name = 'test_testing_utilities.sub'
_description = 'Testing Utilities Subtraction'
name = fields.Char(compute='_compute_name')
value = fields.Integer(default=2)
v = fields.Integer()
parent_id = fields.Many2one('test_testing_utilities.parent')
has_parent = fields.Boolean()
@api.onchange('value')
def _onchange_value(self):
self.v = self.value
@api.depends('v')
def _compute_name(self):
for r in self:
r.name = str(r.v)
@api.onchange('has_parent')
def _onchange_has_parent(self):
if self.has_parent:
self.value = self.parent_id.value
class O2MDefault(models.Model):
_name = 'test_testing_utilities.default'
_description = 'Testing Utilities Default'
def _default_subs(self):
return [
(0, 0, {'v': 5})
]
value = fields.Integer(default=1)
v = fields.Integer()
subs = fields.One2many('test_testing_utilities.sub3', 'parent_id', default=_default_subs)
class O2MSub3(models.Model):
_name = 'test_testing_utilities.sub3'
_description = 'Testing Utilities Subtraction 3'
name = fields.Char(compute='_compute_name')
value = fields.Integer(default=2)
v = fields.Integer(default=6)
parent_id = fields.Many2one('test_testing_utilities.default')
@api.onchange('value')
def _onchange_value(self):
self.v = self.value
@api.depends('v')
def _compute_name(self):
for r in self:
r.name = str(r.v)
class O2MOnchangeParent(models.Model):
_name = 'test_testing_utilities.onchange_parent'
_description = 'Testing Utilities Onchange Parent'
line_ids = fields.One2many('test_testing_utilities.onchange_line', 'parent')
@api.onchange('line_ids')
def _onchange_line_ids(self):
for line in self.line_ids.filtered(lambda l: l.flag):
self.env['test_testing_utilities.onchange_line'].new({'parent': self.id})
class M2OOnchangeLine(models.Model):
_name = 'test_testing_utilities.onchange_line'
_description = 'Testing Utilities Onchange Line'
parent = fields.Many2one('test_testing_utilities.onchange_parent')
dummy = fields.Float()
flag = fields.Boolean(store=False)
@api.onchange('dummy')
def _onchange_flag(self):
self.flag = True
class O2MChangeCount(models.Model):
_name = 'test_testing_utilities.onchange_count'
_description = _name
count = fields.Integer()
line_ids = fields.One2many('test_testing_utilities.onchange_count_sub', 'parent')
@api.onchange('count')
def _onchange_count(self):
Sub = self.env['test_testing_utilities.onchange_count_sub']
recs = Sub
for i in range(self.count):
recs |= Sub.new({'name': str(i)})
self.line_ids = recs
class O2MChangeSub(models.Model):
_name = 'test_testing_utilities.onchange_count_sub'
_description = _name
parent = fields.Many2one('test_testing_utilities.onchange_count')
name = fields.Char()
|
t3dev/odoo
|
odoo/addons/test_testing_utilities/models.py
|
Python
|
gpl-3.0
| 6,753 | 0.002962 |
import os
import sys
if sys.version_info >= (3, 0):
sys.exit(0)
import requests
import json
import numpy as np
import time
import logging
import xgboost as xgb
cur_dir = os.path.dirname(os.path.abspath(__file__))
from test_utils import (create_docker_connection, BenchmarkException, headers,
log_clipper_state)
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.abspath("%s/../clipper_admin" % cur_dir))
from clipper_admin.deployers.python import deploy_python_closure
logging.basicConfig(
format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%y-%m-%d:%H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
app_name = "xgboost-test"
model_name = "xgboost-model"
def deploy_and_test_model(clipper_conn,
model,
version,
predict_fn,
link_model=False):
deploy_python_closure(
clipper_conn,
model_name,
version,
"integers",
predict_fn,
pkgs_to_install=['xgboost'])
time.sleep(5)
if link_model:
clipper_conn.link_model_to_app(app_name, model_name)
time.sleep(5)
test_model(clipper_conn, app_name, version)
def test_model(clipper_conn, app, version):
time.sleep(25)
num_preds = 25
num_defaults = 0
addr = clipper_conn.get_query_addr()
for i in range(num_preds):
response = requests.post(
"http://%s/%s/predict" % (addr, app),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code == requests.codes.ok and result["default"]:
num_defaults += 1
elif response.status_code != requests.codes.ok:
print(result)
raise BenchmarkException(response.text)
if num_defaults > 0:
print("Error: %d/%d predictions were default" % (num_defaults,
num_preds))
if num_defaults > num_preds / 2:
raise BenchmarkException("Error querying APP %s, MODEL %s:%d" %
(app, model_name, version))
def get_test_point():
return [np.random.randint(255) for _ in range(784)]
if __name__ == "__main__":
pos_label = 3
try:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=True)
try:
clipper_conn.register_application(app_name, "integers",
"default_pred", 100000)
time.sleep(1)
addr = clipper_conn.get_query_addr()
response = requests.post(
"http://%s/%s/predict" % (addr, app_name),
headers=headers,
data=json.dumps({
'input': get_test_point()
}))
result = response.json()
if response.status_code != requests.codes.ok:
print("Error: %s" % response.text)
raise BenchmarkException("Error creating app %s" % app_name)
version = 1
dtrain = xgb.DMatrix(get_test_point(), label=[0])
param = {
'max_depth': 2,
'eta': 1,
'silent': 1,
'objective': 'binary:logistic'
}
watchlist = [(dtrain, 'train')]
num_round = 2
bst = xgb.train(param, dtrain, num_round, watchlist)
def predict(xs):
return [str(bst.predict(xgb.DMatrix(xs)))]
deploy_and_test_model(
clipper_conn, bst, version, predict, link_model=True)
except BenchmarkException as e:
log_clipper_state(clipper_conn)
logger.exception("BenchmarkException")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
sys.exit(1)
else:
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
except Exception as e:
logger.exception("Exception")
clipper_conn = create_docker_connection(
cleanup=True, start_clipper=False)
sys.exit(1)
|
dcrankshaw/clipper
|
integration-tests/deploy_xgboost_models.py
|
Python
|
apache-2.0
| 4,355 | 0.000459 |
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
#history https://hg.reportlab.com/hg-public/reportlab/log/tip/src/reportlab/graphics/charts/doughnut.py
# doughnut chart
__version__='3.3.0'
__doc__="""Doughnut chart
Produces a circular chart like the doughnut charts produced by Excel.
Can handle multiple series (which produce concentric 'rings' in the chart).
"""
import copy
from math import sin, cos, pi
from reportlab.lib import colors
from reportlab.lib.validators import isColor, isNumber, isListOfNumbersOrNone,\
isListOfNumbers, isColorOrNone, isString,\
isListOfStringsOrNone, OneOf, SequenceOf,\
isBoolean, isListOfColors,\
isNoneOrListOfNoneOrStrings,\
isNoneOrListOfNoneOrNumbers,\
isNumberOrNone, isListOfNoneOrNumber,\
isListOfListOfNoneOrNumber, EitherOr
from reportlab.lib.attrmap import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.graphics.shapes import Group, Drawing, Line, Rect, Polygon, Ellipse, \
Wedge, String, SolidShape, UserNode, STATE_DEFAULTS
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection, PropHolder
from reportlab.graphics.charts.piecharts import AbstractPieChart, WedgeProperties, _addWedgeLabel, fixLabelOverlaps
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.widgets.markers import Marker
from functools import reduce
from reportlab import xrange
class SectorProperties(WedgeProperties):
"""This holds descriptive information about the sectors in a doughnut chart.
It is not to be confused with the 'sector itself'; this just holds
a recipe for how to format one, and does not allow you to hack the
angles. It can format a genuine Sector object for you with its
format method.
"""
_attrMap = AttrMap(BASE=WedgeProperties,
)
class Doughnut(AbstractPieChart):
_attrMap = AttrMap(
x = AttrMapValue(isNumber, desc='X position of the chart within its container.'),
y = AttrMapValue(isNumber, desc='Y position of the chart within its container.'),
width = AttrMapValue(isNumber, desc='width of doughnut bounding box. Need not be same as width.'),
height = AttrMapValue(isNumber, desc='height of doughnut bounding box. Need not be same as height.'),
data = AttrMapValue(EitherOr((isListOfNoneOrNumber,isListOfListOfNoneOrNumber)), desc='list of numbers defining sector sizes; need not sum to 1'),
labels = AttrMapValue(isListOfStringsOrNone, desc="optional list of labels to use for each data point"),
startAngle = AttrMapValue(isNumber, desc="angle of first slice; like the compass, 0 is due North"),
direction = AttrMapValue(OneOf('clockwise', 'anticlockwise'), desc="'clockwise' or 'anticlockwise'"),
slices = AttrMapValue(None, desc="collection of sector descriptor objects"),
simpleLabels = AttrMapValue(isBoolean, desc="If true(default) use String not super duper WedgeLabel"),
# advanced usage
checkLabelOverlap = AttrMapValue(isBoolean, desc="If true check and attempt to fix\n standard label overlaps(default off)",advancedUsage=1),
sideLabels = AttrMapValue(isBoolean, desc="If true attempt to make chart with labels along side and pointers", advancedUsage=1),
innerRadiusFraction = AttrMapValue(isNumberOrNone,
desc='None or the fraction of the radius to be used as the inner hole.\nIf not a suitable default will be used.'),
)
def __init__(self):
self.x = 0
self.y = 0
self.width = 100
self.height = 100
self.data = [1,1]
self.labels = None # or list of strings
self.startAngle = 90
self.direction = "clockwise"
self.simpleLabels = 1
self.checkLabelOverlap = 0
self.sideLabels = 0
self.innerRadiusFraction = None
self.slices = TypedPropertyCollection(SectorProperties)
self.slices[0].fillColor = colors.darkcyan
self.slices[1].fillColor = colors.blueviolet
self.slices[2].fillColor = colors.blue
self.slices[3].fillColor = colors.cyan
self.slices[4].fillColor = colors.pink
self.slices[5].fillColor = colors.magenta
self.slices[6].fillColor = colors.yellow
def demo(self):
d = Drawing(200, 100)
dn = Doughnut()
dn.x = 50
dn.y = 10
dn.width = 100
dn.height = 80
dn.data = [10,20,30,40,50,60]
dn.labels = ['a','b','c','d','e','f']
dn.slices.strokeWidth=0.5
dn.slices[3].popout = 10
dn.slices[3].strokeWidth = 2
dn.slices[3].strokeDashArray = [2,2]
dn.slices[3].labelRadius = 1.75
dn.slices[3].fontColor = colors.red
dn.slices[0].fillColor = colors.darkcyan
dn.slices[1].fillColor = colors.blueviolet
dn.slices[2].fillColor = colors.blue
dn.slices[3].fillColor = colors.cyan
dn.slices[4].fillColor = colors.aquamarine
dn.slices[5].fillColor = colors.cadetblue
dn.slices[6].fillColor = colors.lightcoral
d.add(dn)
return d
def normalizeData(self, data=None):
from operator import add
sum = float(reduce(add,data,0))
return abs(sum)>=1e-8 and list(map(lambda x,f=360./sum: f*x, data)) or len(data)*[0]
def makeSectors(self):
# normalize slice data
data = self.data
multi = isListOfListOfNoneOrNumber(data)
if multi:
#it's a nested list, more than one sequence
normData = []
n = []
for l in data:
t = self.normalizeData(l)
normData.append(t)
n.append(len(t))
self._seriesCount = max(n)
else:
normData = self.normalizeData(data)
n = len(normData)
self._seriesCount = n
#labels
checkLabelOverlap = self.checkLabelOverlap
L = []
L_add = L.append
labels = self.labels
if labels is None:
labels = []
if not multi:
labels = [''] * n
else:
for m in n:
labels = list(labels) + [''] * m
else:
#there's no point in raising errors for less than enough labels if
#we silently create all for the extreme case of no labels.
if not multi:
i = n-len(labels)
if i>0:
labels = list(labels) + [''] * i
else:
tlab = 0
for m in n:
tlab += m
i = tlab-len(labels)
if i>0:
labels = list(labels) + [''] * i
self.labels = labels
xradius = self.width/2.0
yradius = self.height/2.0
centerx = self.x + xradius
centery = self.y + yradius
if self.direction == "anticlockwise":
whichWay = 1
else:
whichWay = -1
g = Group()
startAngle = self.startAngle #% 360
styleCount = len(self.slices)
irf = self.innerRadiusFraction
if multi:
#multi-series doughnut
ndata = len(data)
if irf is None:
yir = (yradius/2.5)/ndata
xir = (xradius/2.5)/ndata
else:
yir = yradius*irf
xir = xradius*irf
ydr = (yradius-yir)/ndata
xdr = (xradius-xir)/ndata
for sn,series in enumerate(normData):
for i,angle in enumerate(series):
endAngle = (startAngle + (angle * whichWay)) #% 360
aa = abs(startAngle-endAngle)
if aa<1e-5:
startAngle = endAngle
continue
if startAngle < endAngle:
a1 = startAngle
a2 = endAngle
else:
a1 = endAngle
a2 = startAngle
startAngle = endAngle
#if we didn't use %stylecount here we'd end up with the later sectors
#all having the default style
sectorStyle = self.slices[sn,i%styleCount]
# is it a popout?
cx, cy = centerx, centery
if sectorStyle.popout != 0:
# pop out the sector
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle * pi/180.0
popdistance = sectorStyle.popout
cx = centerx + popdistance * cos(aveAngleRadians)
cy = centery + popdistance * sin(aveAngleRadians)
yr1 = yir+sn*ydr
yr = yr1 + ydr
xr1 = xir+sn*xdr
xr = xr1 + xdr
if len(series) > 1:
theSector = Wedge(cx, cy, xr, a1, a2, yradius=yr, radius1=xr1, yradius1=yr1)
else:
theSector = Wedge(cx, cy, xr, a1, a2, yradius=yr, radius1=xr1, yradius1=yr1, annular=True)
theSector.fillColor = sectorStyle.fillColor
theSector.strokeColor = sectorStyle.strokeColor
theSector.strokeWidth = sectorStyle.strokeWidth
theSector.strokeDashArray = sectorStyle.strokeDashArray
shader = sectorStyle.shadingKind
if shader:
nshades = aa / float(sectorStyle.shadingAngle)
if nshades > 1:
shader = colors.Whiter if shader=='lighten' else colors.Blacker
nshades = 1+int(nshades)
shadingAmount = 1-sectorStyle.shadingAmount
if sectorStyle.shadingDirection=='normal':
dsh = (1-shadingAmount)/float(nshades-1)
shf1 = shadingAmount
else:
dsh = (shadingAmount-1)/float(nshades-1)
shf1 = 1
shda = (a2-a1)/float(nshades)
shsc = sectorStyle.fillColor
theSector.fillColor = None
for ish in xrange(nshades):
sha1 = a1 + ish*shda
sha2 = a1 + (ish+1)*shda
shc = shader(shsc,shf1 + dsh*ish)
if len(series)>1:
shSector = Wedge(cx, cy, xr, sha1, sha2, yradius=yr, radius1=xr1, yradius1=yr1)
else:
shSector = Wedge(cx, cy, xr, sha1, sha2, yradius=yr, radius1=xr1, yradius1=yr1, annular=True)
shSector.fillColor = shc
shSector.strokeColor = None
shSector.strokeWidth = 0
g.add(shSector)
g.add(theSector)
if sn == 0 and sectorStyle.visible and sectorStyle.label_visible:
text = self.getSeriesName(i,'')
if text:
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle*pi/180.0
labelRadius = sectorStyle.labelRadius
rx = xradius*labelRadius
ry = yradius*labelRadius
labelX = centerx + (0.5 * self.width * cos(aveAngleRadians) * labelRadius)
labelY = centery + (0.5 * self.height * sin(aveAngleRadians) * labelRadius)
l = _addWedgeLabel(self,text,averageAngle,labelX,labelY,sectorStyle)
if checkLabelOverlap:
l._origdata = { 'x': labelX, 'y':labelY, 'angle': averageAngle,
'rx': rx, 'ry':ry, 'cx':cx, 'cy':cy,
'bounds': l.getBounds(),
}
L_add(l)
else:
#single series doughnut
if irf is None:
yir = yradius/2.5
xir = xradius/2.5
else:
yir = yradius*irf
xir = xradius*irf
for i,angle in enumerate(normData):
endAngle = (startAngle + (angle * whichWay)) #% 360
aa = abs(startAngle-endAngle)
if aa<1e-5:
startAngle = endAngle
continue
if startAngle < endAngle:
a1 = startAngle
a2 = endAngle
else:
a1 = endAngle
a2 = startAngle
startAngle = endAngle
#if we didn't use %stylecount here we'd end up with the later sectors
#all having the default style
sectorStyle = self.slices[i%styleCount]
# is it a popout?
cx, cy = centerx, centery
if sectorStyle.popout != 0:
# pop out the sector
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle * pi/180.0
popdistance = sectorStyle.popout
cx = centerx + popdistance * cos(aveAngleRadians)
cy = centery + popdistance * sin(aveAngleRadians)
if n > 1:
theSector = Wedge(cx, cy, xradius, a1, a2, yradius=yradius, radius1=xir, yradius1=yir)
elif n==1:
theSector = Wedge(cx, cy, xradius, a1, a2, yradius=yradius, radius1=xir, yradius1=yir, annular=True)
theSector.fillColor = sectorStyle.fillColor
theSector.strokeColor = sectorStyle.strokeColor
theSector.strokeWidth = sectorStyle.strokeWidth
theSector.strokeDashArray = sectorStyle.strokeDashArray
shader = sectorStyle.shadingKind
if shader:
nshades = aa / float(sectorStyle.shadingAngle)
if nshades > 1:
shader = colors.Whiter if shader=='lighten' else colors.Blacker
nshades = 1+int(nshades)
shadingAmount = 1-sectorStyle.shadingAmount
if sectorStyle.shadingDirection=='normal':
dsh = (1-shadingAmount)/float(nshades-1)
shf1 = shadingAmount
else:
dsh = (shadingAmount-1)/float(nshades-1)
shf1 = 1
shda = (a2-a1)/float(nshades)
shsc = sectorStyle.fillColor
theSector.fillColor = None
for ish in xrange(nshades):
sha1 = a1 + ish*shda
sha2 = a1 + (ish+1)*shda
shc = shader(shsc,shf1 + dsh*ish)
if n > 1:
shSector = Wedge(cx, cy, xradius, sha1, sha2, yradius=yradius, radius1=xir, yradius1=yir)
elif n==1:
shSector = Wedge(cx, cy, xradius, sha1, sha2, yradius=yradius, radius1=xir, yradius1=yir, annular=True)
shSector.fillColor = shc
shSector.strokeColor = None
shSector.strokeWidth = 0
g.add(shSector)
g.add(theSector)
# now draw a label
if labels[i] and sectorStyle.visible and sectorStyle.label_visible:
averageAngle = (a1+a2)/2.0
aveAngleRadians = averageAngle*pi/180.0
labelRadius = sectorStyle.labelRadius
labelX = centerx + (0.5 * self.width * cos(aveAngleRadians) * labelRadius)
labelY = centery + (0.5 * self.height * sin(aveAngleRadians) * labelRadius)
rx = xradius*labelRadius
ry = yradius*labelRadius
l = _addWedgeLabel(self,labels[i],averageAngle,labelX,labelY,sectorStyle)
if checkLabelOverlap:
l._origdata = { 'x': labelX, 'y':labelY, 'angle': averageAngle,
'rx': rx, 'ry':ry, 'cx':cx, 'cy':cy,
'bounds': l.getBounds(),
}
L_add(l)
if checkLabelOverlap and L:
fixLabelOverlaps(L)
for l in L: g.add(l)
return g
def draw(self):
g = Group()
g.add(self.makeSectors())
return g
def sample1():
"Make up something from the individual Sectors"
d = Drawing(400, 400)
g = Group()
s1 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=0, endangledegrees=120, radius1=100)
s1.fillColor=colors.red
s1.strokeColor=None
d.add(s1)
s2 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=120, endangledegrees=240, radius1=100)
s2.fillColor=colors.green
s2.strokeColor=None
d.add(s2)
s3 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=240, endangledegrees=260, radius1=100)
s3.fillColor=colors.blue
s3.strokeColor=None
d.add(s3)
s4 = Wedge(centerx=200, centery=200, radius=150, startangledegrees=260, endangledegrees=360, radius1=100)
s4.fillColor=colors.gray
s4.strokeColor=None
d.add(s4)
return d
def sample2():
"Make a simple demo"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [10,20,30,40,50,60]
d.add(dn)
return d
def sample3():
"Make a more complex demo"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [[10,20,30,40,50,60], [10,20,30,40]]
dn.labels = ['a','b','c','d','e','f']
d.add(dn)
return d
def sample4():
"Make a more complex demo with Label Overlap fixing"
d = Drawing(400, 400)
dn = Doughnut()
dn.x = 50
dn.y = 50
dn.width = 300
dn.height = 300
dn.data = [[10,20,30,40,50,60], [10,20,30,40]]
dn.labels = ['a','b','c','d','e','f']
dn.checkLabelOverlap = True
d.add(dn)
return d
if __name__=='__main__':
from reportlab.graphics.renderPDF import drawToFile
d = sample1()
drawToFile(d, 'doughnut1.pdf')
d = sample2()
drawToFile(d, 'doughnut2.pdf')
d = sample3()
drawToFile(d, 'doughnut3.pdf')
|
piMoll/SEILAPLAN
|
lib/reportlab/graphics/charts/doughnut.py
|
Python
|
gpl-2.0
| 19,476 | 0.011861 |
# DESCRIPTION
# Renders a PNG image like bacteria that mutate color as they spread. TRY IT. The output is awesome.
# DEPENDENCIES
# python 3 with numpy, queue, and pyimage modules installed (and others--see the import statements).
# USAGE
# Run this script through a Python interpreter without any parameters, and it will use a default set of parameters:
# python /path/to_this_script/color_growth.py
# To see available parameters, run this script with the --help switch:
# python /path/to_this_script/ --help
# NOTES
# - GitHub user `scribblemaniac` sped up this script (with a submitted pull request) by orders of magnitute vs. an earlier version of the script. An image that took seven minutes to render took 5 seconds after speedup.
# - Output file names are based on the date and time and random characters. Inspired and drastically evolved from `color_fibers.py`, which was horked and adapted from:
#
# https://scipython.com/blog/computer-generated-contemporary-art
#
# KNOWN ISSUES
# See help for `--RANDOM_SEED`.
# CODE
# TO DO
# - figure out whether I broke RND continuity? It would seem the same presets are no longer producing the same results?
# - isolate what situation didn't create a new preset / anim folder when I expected it to, and fix that (or document in help).
# - make naming convention of variables consistent? I think I'm all over the place with this . . . :p
# - possibly things in the color_growth_v1.py's TO DO list.
# - determine whether any code in the fast fork (now this script) is leftover from color_growth_v1.py, and delete them?
# - make it properly use negative or > 8 growth-clip values again? Since the color_growth_fast.py fork it isn't.
# VERSION HISTORY
# v2.8.7:
# Edit speedup credit comment.
# START IMPORTS AND GLOBALS
ColorGrowthPyVersionString = 'v2.8.7'
import datetime
import random
import argparse
import ast
import os.path
import sys
import re
import queue
from more_itertools import unique_everseen
import platform
# I'm also using another psuedorandom number generator built into numpy as np:
import numpy as np
from PIL import Image
# Defaults which will be overridden if arguments of the same name are provided to the script:
WIDTH = 600
HEIGHT = 300
RSHIFT = 8
STOP_AT_PERCENT = 1
SAVE_EVERY_N = 0
RAMP_UP_SAVE_EVERY_N = False
START_COORDS_RANGE = (1,3)
GROWTH_CLIP = (0,5)
SAVE_PRESET = True
animationFrameCounter = 0
renderedFrameCounter = 0
saveNextFrameNumber = 0
imageFrameFileName = ''
padFileNameNumbersDigitsWidth = 0
# SOME BACKGROUND COLOR options;
# any of these (uncomment only one) are made into a list later by ast.literal_eval(BG_COLOR) :
# BG_COLOR = "[157,140,157]" # Medium purplish gray
BG_COLOR = "[252,251,201]" # Buttery light yellow
# BG_COLOR = "[255,63,52]" # Scarlet-scarlet-orange
RECLAIM_ORPHANS = True
BORDER_BLEND = True
TILEABLE = False
SCRIPT_ARGS_STR = ''
# END GLOBALS
# START OPTIONS (which affect globals)
# allows me to have a version string parser option that prints
# and exits; re: https://stackoverflow.com/a/41575802/1397555
class versionStringPrintAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
print('color_growth.py', ColorGrowthPyVersionString)
parser.exit()
PARSER = argparse.ArgumentParser(description=
'Renders a PNG image like bacteria that produce random color mutations \
as they grow over a surface. Output file names are named after the date \
and time. Inspired by and drastically evolved from colorFibers.py, which \
was horked and adapted from \
https://scipython.com/blog/computer-generated-contemporary-art/ \
NOTE: CLI options have had breaking changes over time. If reusing settings \
from a previous version, check those settings first if you get errors. \
NOTE: by default the --RAMP_UP_SAVE_EVERY_N switch has a False value, but \
you probably want it True if you save animation frames (--SAVE_EVERY_N).'
)
PARSER.register('action', 'versionStringPrint', versionStringPrintAction)
PARSER.add_argument('-v', '--VERSION', nargs=0, action='versionStringPrint', help='Print version number and exit.')
PARSER.add_argument('--WIDTH', type=int, help=
'WIDTH of output image(s). Default ' + str(WIDTH) + '.')
PARSER.add_argument('--HEIGHT', type=int, help=
'HEIGHT of output image(s). Default ' + str(HEIGHT) + '.')
PARSER.add_argument('-r', '--RSHIFT', type=int, help=
'Vary R, G and B channel values randomly in the range negative this \
value or positive this value. Note that this means the range is RSHIFT \
times two. Defaut ' + str(RSHIFT) + '.'
)
PARSER.add_argument('-b', '--BG_COLOR', type=str, help=
'Canvas color. Expressed as a python list or single number that will be \
assigned to every value in an RGB triplet. If a list, give the RGB \
values in the format \'[255,70,70]\' (if you add spaces after the \
commas, you must surround the parameter in single or double quotes). \
This example would produce a deep red, as Red = 255, Green = 70, Blue = \
70). A single number example like just 150 will result in a medium-light \
gray of [150,150,150] (Red = 150, Green = 150, Blue = 150). All values \
must be between 0 and 255. Default ' + str(BG_COLOR) + '.'
)
PARSER.add_argument('-c', '--COLOR_MUTATION_BASE', type=str, help=
'Base initialization color for pixels, which randomly mutates as \
painting proceeds. If omitted, defaults to whatever BG_COLOR is. If \
included, may differ from BG_COLOR. This option must be given in the \
same format as BG_COLOR. You may make the base initialization color of \
each origin random by specifying "--COLOR_MUTATION_BASE random".'
)
PARSER.add_argument('--BORDER_BLEND', type=str, help=
'If this is enabled, the hard edges between different colonies will be \
blended together. Enabled by default. To disable pass \
--BORDER_BLEND False or --BORDER_BLEND 0.'
)
PARSER.add_argument('--TILEABLE', type=str, help=
'Make the generated image seamlessly tile. Colonies will wrap around \
the edge when they encounter it. Disabled by default. Enable with \
--TILEABLE True or --TILEABLE 1.'
)
PARSER.add_argument('--STOP_AT_PERCENT', type=float, help=
'What percent canvas fill to stop painting at. To paint until the canvas \
is filled (which can take extremely long for higher resolutions), pass 1 \
(for 100 percent). If not 1, value should be a percent expressed as a \
decimal (float) between 0 and 1 (e.g 0.4 for 40 percent. Default ' + \
str(STOP_AT_PERCENT) + '. For high --failedMutationsThreshold or random \
walk (neither of which is implemented at this writing), 0.475 (around 48 \
percent) is recommended. Stop percent is adhered to approximately (it \
could be much less efficient to make it exact).'
)
PARSER.add_argument('-a', '--SAVE_EVERY_N', type=int, help=
'Every N successful coordinate and color mutations, save an animation \
frame into a subfolder named after the intended final art file. To save \
every frame, set this to 1, or to save every 3rd frame set it to 3, etc. \
Saves zero-padded numbered frames to a subfolder which may be strung \
together into an animation of the entire painting process (for example \
via ffmpegAnim.sh). May substantially slow down render, and can also \
create many, many gigabytes of data, depending. ' + str(SAVE_EVERY_N) + \
' by default. To disable, set it to 0 with: -a 0 OR: --SAVE_EVERY_N 0. \
NOTE: If this is nonzero and you do not set --RAMP_UP_SAVE_EVERY_N to \
either True or False (see), the default --RAMP_UP_SAVE_EVERY_N False \
will override to True, as it is strongly suggested you want that if \
you render an animation. If that is not what you want, manually set \
--RAMP_UP_SAVE_EVERY_N False.'
)
PARSER.add_argument('--RAMP_UP_SAVE_EVERY_N', type=str, help=
'Increase the value of --SAVE_EVERY_N over time. Without this, the \
animation may seem to slow toward the middle and end, because the \
interval --SAVE_EVERY_N is constant; the same number of new mutated \
coordinates is spread over a wider area every save frame. \
--RAMP_UP_SAVE_EVERY_N causes the value of --SAVE_EVERY_N to increase \
over time, like dragging the corner of a selection rectangle to increase \
rendered area over the whole canvas. The result is an apparently \
more visually linear growth (in all growth vectors) and a faster \
animation (and faster animation render, as less time is made saving \
fewer frames), but technically the growth rate (vs. saved animation frames) \
actually increases over time. Default ' + str(RAMP_UP_SAVE_EVERY_N) + '. \
NOTES: 1) Relies on --SAVE_EVERY_N being nonzero. Script will warn and exit \
if --RAMP_UP_SAVE_EVERY_N is True and --SAVE_EVERY_N is 0 (zero). \
2) Save frame intervals near start of animation may be similar to \
--SAVE_EVERY_N value, but as noted increase (and can increase a lot) \
over time. 3) To re-render animations created prior to v2.6.6 the same \
as at their creation --RAMP_UP_SAVE_EVERY_N must be False (as this feature \
was introduced in v2.6.6). 4) See related NOTE for --SAVE_EVERY_N.'
)
PARSER.add_argument('-s', '--RANDOM_SEED', type=int, help=
'Seed for random number generators (random and numpy.random are used). \
Default generated by random library itself and added to render file name \
for reference. Can be any integer in the range 0 to 4294967296 (2^32). \
If not provided, it will be randomly chosen from that range (meta!). If \
--SAVE_PRESET is used, the chosen seed will be saved with the preset \
.cgp file. KNOWN ISSUE: functional differences between random generators \
of different versions of Python and/or Python, maybe on different platforms, \
produce different output from the same random seed. ALSO, some versions of \
this script had code that accidentally altered the pseudorandom number \
sequence via something outside the intended color growth algorithm. The \
result was different output from the same --RANDOM_SEED. If you get \
different output than before from the same --RANDOM_SEED, search for and \
examine the VESTIGAL CODE comment(s!), and try uncommenting the line of code \
they detail.'
)
PARSER.add_argument('-q', '--START_COORDS_N', type=int, help=
'How many origin coordinates to begin coordinate and color mutation \
from. Default randomly chosen from range in --START_COORDS_RANGE (see). \
Random selection from that range is performed *after* random seeding by \
--RANDOM_SEED, so that the same random seed will always produce the same \
number of start coordinates. I haven\'t tested whether this will work if \
the number exceeds the number of coordinates possible in the image. \
Maybe it would just overlap itself until they\'re all used?'
)
PARSER.add_argument('--START_COORDS_RANGE', help=
'Random integer range to select a random number of --START_COORDS_N if \
--START_COORDS_N is not provided. Default (' + \
str(START_COORDS_RANGE[0]) + ',' + str(START_COORDS_RANGE[1]) + '). Must \
be provided in that form (a string surrounded by double quote marks (for \
Windows) which can be evaluated to a python tuple), and in the range 0 \
to 4294967296 (2^32), but I bet that sometimes nothing will render if \
you choose a max range number orders of magnitude higher than the number \
of pixels available in the image. I probably would never make the max \
range higher than (number of pixesl in image) / 62500 (which is 250 \
squared). Will not be used if [-q | START_COORDS_N] is provided.'
)
PARSER.add_argument('--CUSTOM_COORDS_AND_COLORS', type=str, help=
'Custom coordinate locations and colors list to initialized coordinate \
mutation queue with. In complex nested lists of tuples _and lists_ \
format (I know, it\'s crazy), surrounded by single or double quote marks, \
OR passed without any space characters in the parameter, like: \
\'[[(coordinate),[color]], [(coordinate),[color]], [(coordinate),[color]]]\', \
or more accurately like: \
[[(50,40),[255,0,255]],[(88,84),[0,255,255]]]. NOTES: \
1) Because this overrides --START_COORDS_N, --START_COORDS_RANGE, and \
--COLOR_MUTATION_BASE, if you want random numbers of coordinates and \
coordinate positions with this, contrive them via another custom script \
or program, and pass them to this. 2) Internally in code the coordinates \
are zero-index-based, which means 0 is 1, 1 is 2, 4 is 5, etc.; BUT \
that\'s not human-friendly, so use the actual values (1 is 1!) \
and the program will just subtract 1 for the zero-based indexing. 3) \
Although internally in code, coordinates are represented as (y,x) tuples \
(or (down,across), that confuses me and isn\'t standard or expected for \
humans, so in this parameter coordinate are represented as (x,y) (or \
(across,down), and the code swaps them before assignment to real, \
internal tuples. You\'re welcome.'
)
PARSER.add_argument('--GROWTH_CLIP', type=str, help=
'Affects seeming "thickness" (or viscosity) of growth. A Python tuple \
expressed as a string (must be surrounded by double quote marks for \
Windows). Default ' + str(GROWTH_CLIP) + '. In growth into adjacent \
coordinates, the maximum number of possible neighbor coordinates to grow \
into is 8 (which may only ever happen with a start coordinate: in \
practical terms, the most coordinates that may usually be expanded into \
is 7). The first number in the tuple is the minimum number of \
coordinates to randomly select, and the second number is the maximum. \
The second must be greater than the first. The first may be lower than 0 \
and will be clipped to 1, making selection of only 1 neighbor coordinate \
more common. The second number may be higher than 8 (or the number of \
available coordinates as the case may be), and will be clipped to the \
maximum number of available coordinates, making selection of all \
available coordinates more common. If the first number is a positive \
integer <= 7, at least that many coordinates will always be selected \
when possible. If the second number is a positive integer >= 1, at most \
that many coordinates will ever be selected. A negative first number or \
low first number clip will tend toward a more evenly spreading liquid \
appearance, and a lower second number clip will cause a more \
stringy/meandering/splatty path or form (as it spreads less uniformly). \
With an effectively more viscous clip like "(2,4)", smaller \
streamy/flood things may traverse a distance faster. Some tuples make \
--RECLAIM_ORPHANS quickly fail, some make it virtually never fail.'
)
PARSER.add_argument('--RECLAIM_ORPHANS', type=str, help=
'Coordinates can end up never mutating color, and remain the same color \
as --BG_COLOR (which may result in the appearance of pixels that seem \
like flecks or discontiguous color). This may be more likely with a \
--GROWTH_CLIP range nearer zero (higher viscosity). This option coralls \
these orphan coordinates and revives them so that their color will \
mutate. Default ' + str(RECLAIM_ORPHANS) + '. To disable pass \
--RECLAIM_ORPHANS False or --RECLAIM_ORPHANS 0.'
)
PARSER.add_argument('--SAVE_PRESET', type=str, help=
'Save all parameters (which are passed to this script) to a .cgp (color \
growth preset) file. If provided, --SAVE_PRESET must be a string \
representing a boolean state (True or False or 1 or 0). Default '+ \
str(SAVE_PRESET) +'. The .cgp file can later be loaded with the \
--LOAD_PRESET switch to create either new or identical work from the \
same parameters (whether it is new or identical depends on the switches, \
--RANDOM_SEED being the most consequential). This with [-a | \
--SAVE_EVERY_N] can recreate gigabytes of exactly the same animation \
frames using just a preset. NOTES: 1) --START_COORDS_RANGE and its \
accompanying value are not saved to config files, and the resultantly \
generated [-q | --START_COORDS_N] is saved instead. 2) You may add \
arbitrary text (such as notes) to the second and subsequent lines of a \
saved preset, as only the first line is used.'
)
PARSER.add_argument('--LOAD_PRESET', type=str, help=
'A preset file (as first created by --SAVE_PRESET) to use. Empty (none \
used) by default. Not saved to any preset. At this writing only a single \
file name is handled, not a path, and it is assumed the file is in the \
current directory. A .cgp preset file is a plain text file on one line, \
which is a collection of SWITCHES to be passed to this script, written \
literally the way you would pass them to this script. NOTE: you may load \
a preset and override any switches in the preset by using the override \
after --LOAD_PRESET. For example, if a preset contains --RANDOM SEED \
98765 but you want to override it with 12345, pass --LOAD_PRESET \
<preset_filename.cgp> --RANDOM_SEED 12345 to this script.'
)
# START ARGUMENT PARSING
# DEVELOPER NOTE: Throughout the below argument checks, wherever a user does not specify an argument and I use a default (as defaults are defined near the start of working code in this script), add that default switch and switch value pair argsparse, for use by the --SAVE_PRESET feature (which saves everything except for the script path ([0]) to a preset). I take this approach because I can't check if a default value was supplied if I do that in the PARSER.add_argument function --
# http://python.6.x6.nabble.com/argparse-tell-if-arg-was-defaulted-td1528162.html
# -- so what I do is check for None (and then supply a default and add to argsparse if None is found). The check for None isn't literal: it's in the else clause after an if (value) check (if the if check fails, that means the value is None, and else: is used) :
print('')
print('Processing any arguments to script . . .')
# allows me to override parser arguments declared in this namespace:
class ARGUMENTS_NAMESPACE:
pass
argumentsNamespace = ARGUMENTS_NAMESPACE()
# Weirdly, for the behavior I want, I must call parse_args a few times:
# - first to get the --LOAD_PRESET CLI argument if there is any
# - then potentially many times to iterate over arguments got from the
# .cgp config file specified
# - then again to override any of those with options passed via CLI
# which I want to override those.
# DEPRECATED: call parse_args with default no parameters (except it is done before the above of necessity):
# ARGS = PARSER.parse_args()
# NOW, create a namespace that allows loaded .cgp file parameters to overwrite values in:
# re: https://docs.python.org/3/library/argparse.html#argparse.Namespace
# re: https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.parse_args
ARGS = PARSER.parse_args(args=sys.argv[1:], namespace=argumentsNamespace)
# Build dictionary from ARGS and use it to build global SCRIPT_ARGS_STR;
# clean it up later (we don't want elements in it with value "None":
argsDict = vars(ARGS)
# modify like this:
# argsDict['COLOR_MUTATION_BASE'] = '[0,0,0]'
# IF A PRESET file is given, load its contents and make its parameters override anything else that was just parsed through the argument parser:
if ARGS.LOAD_PRESET:
LOAD_PRESET = ARGS.LOAD_PRESET
with open(LOAD_PRESET) as f:
SWITCHES = f.readline()
# Remove spaces from parameters in tuples like (1, 13), because it
# mucks up this parsing:
SWITCHES = re.sub('(\([0-9]*),\s*([0-9]*\))', r'\1,\2', SWITCHES)
# removes any start and end whitespace that can throw off
# the following parsing:
SWITCHES = SWITCHES.strip()
SWITCHES = SWITCHES.split(' ')
for i in range(0, len(SWITCHES), 2):
ARGS = PARSER.parse_args(args=[SWITCHES[i], SWITCHES[i+1]], namespace=argumentsNamespace)
# Doing this again here so that anything in the command line overrides:
ARGS = PARSER.parse_args(args=sys.argv[1:], namespace=argumentsNamespace) # When this
# If a user supplied an argument (so that WIDTH has a value (is not None), use that:
if ARGS.WIDTH:
# It is in argsparse already, so it will be used by --WIDTH:
WIDTH = ARGS.WIDTH
else:
# If not, leave the default as it was defined globally, and add to argsDict
# so it can be saved in a .cfg preset:
argsDict['WIDTH'] = WIDTH
if ARGS.HEIGHT:
HEIGHT = ARGS.HEIGHT
else:
argsDict['HEIGHT'] = HEIGHT
if ARGS.RSHIFT:
RSHIFT = ARGS.RSHIFT
else:
argsDict['RSHIFT'] = RSHIFT
if ARGS.BG_COLOR:
# For preset saving, remove spaces and write back to argsparse,
# OR ADD IT (if it was gotten through argparse), so a preset saved by
# --SAVE_PRESET won't cause errors:
BG_COLOR = ARGS.BG_COLOR
BG_COLOR = re.sub(' ', '', BG_COLOR)
argsDict['BG_COLOR'] = BG_COLOR
else:
argsDict['BG_COLOR'] = BG_COLOR
# Convert BG_COLOR (as set from ARGS.BG_COLOR or default) string to python list for use
# by this script, re: https://stackoverflow.com/a/1894296/1397555
BG_COLOR = ast.literal_eval(BG_COLOR)
# See comments in ARGS.BG_COLOR handling; handled the same:
if not ARGS.CUSTOM_COORDS_AND_COLORS:
if ARGS.COLOR_MUTATION_BASE:
COLOR_MUTATION_BASE = ARGS.COLOR_MUTATION_BASE
COLOR_MUTATION_BASE = re.sub(' ', '', COLOR_MUTATION_BASE)
argsDict['COLOR_MUTATION_BASE'] = COLOR_MUTATION_BASE
if ARGS.COLOR_MUTATION_BASE.lower() == 'random':
COLOR_MUTATION_BASE = 'random'
else:
COLOR_MUTATION_BASE = ast.literal_eval(COLOR_MUTATION_BASE)
else: # Write same string as BG_COLOR, after the same silly string manipulation as
# for COLOR_MUTATION_BASE, but more ridiculously now _back_ from that to
# a string again:
BG_COLOR_TMP_STR = str(BG_COLOR)
BG_COLOR_TMP_STR = re.sub(' ', '', BG_COLOR_TMP_STR)
argsDict['COLOR_MUTATION_BASE'] = BG_COLOR_TMP_STR
# In this case we're using a list as already assigned to BG_COLOR:
COLOR_MUTATION_BASE = list(BG_COLOR)
# If I hadn't used list(), COLOR_MUTATION_BASE would be a reference to BG_COLOR (which
# is default Python list handling behavior with the = operator), and when I changed either,
# "both" would change (but they would really just be different names for the same list).
# I want them to be different.
# purple = [255, 0, 255] # Purple. In prior commits of this script, this has been defined
# and unused, just like in real life. Now, it is commented out or not even defined, just
# like it is in real life.
if ARGS.RECLAIM_ORPHANS:
RECLAIM_ORPHANS = ast.literal_eval(ARGS.RECLAIM_ORPHANS)
else:
argsDict['RECLAIM_ORPHANS'] = RECLAIM_ORPHANS
if ARGS.BORDER_BLEND:
BORDER_BLEND = ast.literal_eval(ARGS.BORDER_BLEND)
else:
argsDict['BORDER_BLEND'] = BORDER_BLEND
if ARGS.TILEABLE:
TILEABLE = ast.literal_eval(ARGS.TILEABLE)
else:
argsDict['TILEABLE'] = TILEABLE
if ARGS.STOP_AT_PERCENT:
STOP_AT_PERCENT = ARGS.STOP_AT_PERCENT
else:
argsDict['STOP_AT_PERCENT'] = STOP_AT_PERCENT
if ARGS.SAVE_EVERY_N:
SAVE_EVERY_N = ARGS.SAVE_EVERY_N
else:
argsDict['SAVE_EVERY_N'] = SAVE_EVERY_N
# Conditional override:
if ARGS.SAVE_EVERY_N and not ARGS.RAMP_UP_SAVE_EVERY_N:
RAMP_UP_SAVE_EVERY_N = True
argsDict['RAMP_UP_SAVE_EVERY_N'] = 'True'
if ARGS.RAMP_UP_SAVE_EVERY_N:
RAMP_UP_SAVE_EVERY_N = ast.literal_eval(ARGS.RAMP_UP_SAVE_EVERY_N)
if SAVE_EVERY_N == 0 and RAMP_UP_SAVE_EVERY_N == True:
print('--RAMP_UP_SAVE_EVERY_N is True, but --SAVE_EVERY_N is 0. --SAVE_EVERY_N must be nonzero if --RAMP_UP_SAVE_EVERY_N is True. Either set --SAVE_EVERY_N to something other than 0, or set RAMP_UP_SAVE_EVERY_N to False. Exiting script.')
sys.exit(2)
else:
argsDict['RAMP_UP_SAVE_EVERY_N'] = RAMP_UP_SAVE_EVERY_N
if ARGS.RANDOM_SEED:
RANDOM_SEED = ARGS.RANDOM_SEED
else:
RANDOM_SEED = random.randint(0, 4294967296)
argsDict['RANDOM_SEED'] = RANDOM_SEED
# Use that seed straightway:
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
# BEGIN STATE MACHINE "Megergeberg 5,000."
# DOCUMENTATION.
# Possible combinations of these variables to handle; "coords" means START_COORDS_N, RNDcoords means START_COORDS_RANGE:
# --
# ('coords', 'RNDcoords') : use coords, delete any RNDcoords
# ('coords', 'noRNDcoords') : use coords, no need to delete any RNDcoords. These two:
# if coords if RNDcoords.
# ('noCoords', 'RNDcoords') : assign user-provided RNDcoords for use (overwrite defaults).
# ('noCoords', 'noRNDcoords') : continue with RNDcoords defaults (don't overwrite defaults).
# These two: else if RNDcoords else. Also these two: generate coords independent of (outside) that last if else (by using whatever RNDcoords ends up being (user-provided
# or default).
# --
# I COULD just have four different, independent "if" checks explicitly for those four pairs and work from that, but this is more compact logic (fewer checks).
# If --START_COORDS_N is provided by the user, use it, unless there is overriding CUSTOM_COORDS_AND_COLORS:
if not ARGS.CUSTOM_COORDS_AND_COLORS:
if ARGS.START_COORDS_N:
START_COORDS_N = ARGS.START_COORDS_N
print('Will use the provided --START_COORDS_N, ', START_COORDS_N)
if ARGS.START_COORDS_RANGE:
# .. and delete any --START_COORDS_RANGE and its value from argsparse (as it will not be used and would best not be stored in the .cgp config file via --SAVE_PRESET:
argsDict.pop('START_COORDS_RANGE', None)
print(
'** NOTE: ** You provided both [-q | --START_COORDS_N] and --START_COORDS_RANGE, \
but the former overrides the latter (the latter will not be used). This program \
disregards the latter from the parameters list.'
)
else: # If --START_COORDS_N is _not_ provided by the user..
if ARGS.START_COORDS_RANGE:
# .. but if --START_COORDS_RANGE _is_ provided, assign from that:
START_COORDS_RANGE = ast.literal_eval(ARGS.START_COORDS_RANGE)
STR_PART = 'from user-supplied range ' + str(START_COORDS_RANGE)
else: # .. otherwise use the default START_COORDS_RANGE:
STR_PART = 'from default range ' + str(START_COORDS_RANGE)
START_COORDS_N = random.randint(START_COORDS_RANGE[0], START_COORDS_RANGE[1])
argsDict['START_COORDS_N'] = START_COORDS_N
print('Using', START_COORDS_N, 'start coordinates, by random selection ' + STR_PART)
# END STATE MACHINE "Megergeberg 5,000."
if ARGS.CUSTOM_COORDS_AND_COLORS:
CUSTOM_COORDS_AND_COLORS = ARGS.CUSTOM_COORDS_AND_COLORS
CUSTOM_COORDS_AND_COLORS = re.sub(' ', '', CUSTOM_COORDS_AND_COLORS)
argsDict['CUSTOM_COORDS_AND_COLORS'] = CUSTOM_COORDS_AND_COLORS
CUSTOM_COORDS_AND_COLORS = ast.literal_eval(ARGS.CUSTOM_COORDS_AND_COLORS)
if ARGS.GROWTH_CLIP: # See comments in ARGS.BG_COLOR handling. Handled the same.
GROWTH_CLIP = ARGS.GROWTH_CLIP
GROWTH_CLIP = re.sub(' ', '', GROWTH_CLIP)
argsDict['GROWTH_CLIP'] = GROWTH_CLIP
GROWTH_CLIP = ast.literal_eval(GROWTH_CLIP)
# NOTE: VESTIGAL CODE HERE that will alter pseudorandom determinism if commented vs. not commented out; if render from a preset doesn't produce the same result as it once did, try uncommenting the next line! :
# zax_blor = ('%03x' % random.randrange(16**6))
else:
temp_str = str(GROWTH_CLIP)
temp_str = re.sub(' ', '', temp_str)
argsDict['GROWTH_CLIP'] = GROWTH_CLIP
if ARGS.SAVE_PRESET:
SAVE_PRESET = ast.literal_eval(ARGS.SAVE_PRESET)
else:
argsDict['SAVE_PRESET'] = SAVE_PRESET
# END ARGUMENT PARSING
# Remove arguments from argsDict whose values are 'None' from that (they cause problems when doing things with the arguments list via CLI, as intended) :
for key in argsDict:
# if the key value is 'None', don't bother saving it; otherwise save it:
if argsDict[key] != None:
keyValStr = '--' + key + ' ' + str(argsDict[key])
SCRIPT_ARGS_STR += keyValStr + ' '
# removes whitespace from start and end that would mess up parse code earlier in the script (if I didn't do this there also) :
SCRIPT_ARGS_STR = SCRIPT_ARGS_STR.strip()
# ADDITIONAL GLOBALS defined here:
allPixelsN = WIDTH * HEIGHT
stopRenderAtPixelsN = int(allPixelsN * STOP_AT_PERCENT)
# If RAMP_UP_SAVE_EVERY_N is True, create list saveFramesAtCoordsPaintedArray with increasing values for when to save N evolved coordinates to animation frames:
saveFramesAtCoordsPaintedArray = []
if SAVE_EVERY_N != 0 and RAMP_UP_SAVE_EVERY_N == True:
allPixelsNdividedBy_SAVE_EVERY_N = allPixelsN / SAVE_EVERY_N
divisor = 1 / allPixelsNdividedBy_SAVE_EVERY_N
saveFramesAtCoordsPaintedMultipliers = [x * divisor for x in range(0, int(allPixelsNdividedBy_SAVE_EVERY_N)+1)]
for multiplier in saveFramesAtCoordsPaintedMultipliers:
mod_w = WIDTH * multiplier
mod_h = HEIGHT * multiplier
mod_area = mod_w * mod_h
saveFramesAtCoordsPaintedArray.append(int(mod_area))
# Deduplicate elements in the list but maintain order:
saveFramesAtCoordsPaintedArray = list(unique_everseen(saveFramesAtCoordsPaintedArray))
# Because that resulting list doesn't include the ending number, add it:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# If RAMP_UP_SAVE_EVERY_N is False, create list saveFramesAtCoordsPaintedArray with values at constant intervals for when to save animation frames:
if SAVE_EVERY_N != 0 and RAMP_UP_SAVE_EVERY_N == False:
saveFramesAtCoordsPaintedArray = [x * SAVE_EVERY_N for x in range(0, int(stopRenderAtPixelsN/SAVE_EVERY_N)+1 )]
# Because that range doesn't include the end of the range:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# Because that resulting list doesn't include the ending number, add it:
saveFramesAtCoordsPaintedArray.append(stopRenderAtPixelsN)
# Values of these used elsewhere:
saveFramesAtCoordsPaintedArrayIDX = 0
saveFramesAtCoordsPaintedArrayMaxIDX = (len(saveFramesAtCoordsPaintedArray) - 1)
def is_coord_in_bounds(y, x):
return y >= 0 and y < HEIGHT and x >= 0 and x < WIDTH
def is_color_valid(y, x, canvas):
return canvas[y][x][0] >= 0 # Negative number used for invalid color
def get_rnd_unallocd_neighbors(y, x, canvas):
"""Returns both a set() of randomly selected empty neighbor coordinates to use
immediately, and a set() of neighbors to use later."""
# init an empty set we'll populate with neighbors (int tuples) and return:
rnd_neighbors_to_ret = []
unallocd_neighbors = set()
for i in range(-1, 2):
for j in range(-1, 2):
if TILEABLE:
if not (i == 0 and j == 0) and not is_color_valid((y+i) % HEIGHT, (x+j) % WIDTH, canvas):
unallocd_neighbors.add(((y+i) % HEIGHT, (x+j) % WIDTH))
else:
if not (i == 0 and j == 0) and is_coord_in_bounds(y+i, x+j) and not is_color_valid(y+i, x+j, canvas):
unallocd_neighbors.add((y+i, x+j))
if unallocd_neighbors: # If there is anything left in unallocd_neighbors:
# START GROWTH_CLIP (VISCOSITY) CONTROL.
# Decide how many to pick:
n_neighbors_to_ret = np.clip(np.random.randint(GROWTH_CLIP[0], GROWTH_CLIP[1] + 1), 0, len(unallocd_neighbors))
# END GROWTH_CLIP (VISCOSITY) CONTROL.
rnd_neighbors_to_ret = random.sample(unallocd_neighbors, n_neighbors_to_ret)
for neighbor in rnd_neighbors_to_ret:
unallocd_neighbors.remove(neighbor)
return rnd_neighbors_to_ret, unallocd_neighbors
def find_adjacent_color(y, x, canvas):
allocd_neighbors = []
for i in range(-1, 2):
for j in range(-1, 2):
if TILEABLE:
if not (i == 0 and j == 0) and is_color_valid((y+i) % HEIGHT, (x+j) % WIDTH, canvas):
allocd_neighbors.append(((y+i) % HEIGHT, (x+j) % WIDTH))
else:
if not (i == 0 and j == 0) and is_coord_in_bounds(y+i, x+j) and is_color_valid(y+i, x+j, canvas):
allocd_neighbors.append((y+i, x+j))
if not allocd_neighbors:
return None
else:
y, x = random.choice(allocd_neighbors)
return canvas[y][x]
def coords_set_to_image(canvas, render_target_file_name):
"""Creates and saves image from dict of Coordinate objects, HEIGHT and WIDTH definitions,
and a filename string."""
tmp_array = [[BG_COLOR if x[0] < 0 else x for x in row] for row in canvas]
tmp_array = np.asarray(tmp_array)
image_to_save = Image.fromarray(tmp_array.astype(np.uint8)).convert('RGB')
image_to_save.save(render_target_file_name)
def print_progress(newly_painted_coords):
"""Prints coordinate plotting statistics (progress report)."""
print('newly painted : total painted : target : canvas size : reclaimed orphans')
print(newly_painted_coords, ':', painted_coordinates, ':', \
stopRenderAtPixelsN, ':', allPixelsN, ':', orphans_to_reclaim_n)
def set_img_frame_file_name():
global padFileNameNumbersDigitsWidth
global renderedFrameCounter
global imageFrameFileName
renderedFrameCounter += 1
frameNumberStr = str(renderedFrameCounter)
imageFrameFileName = anim_frames_folder_name + '/' + frameNumberStr.zfill(padFileNameNumbersDigitsWidth) + '.png'
def save_animation_frame():
# Tells the function we are using global variables:
global animationFrameCounter
global saveNextFrameNumber
global saveFramesAtCoordsPaintedArrayIDX
global saveFramesAtCoordsPaintedArrayMaxIDX
# print('animationFrameCounter', animationFrameCounter, 'saveNextFrameNumber', saveNextFrameNumber)
if SAVE_EVERY_N != 0:
if (animationFrameCounter == saveNextFrameNumber):
# only increment the ~IDX if it will be in array bounds:
if (saveFramesAtCoordsPaintedArrayIDX + 1) < saveFramesAtCoordsPaintedArrayMaxIDX:
saveFramesAtCoordsPaintedArrayIDX += 1
saveNextFrameNumber = saveFramesAtCoordsPaintedArray[saveFramesAtCoordsPaintedArrayIDX]
set_img_frame_file_name()
# Only write frame if it does not already exist (allows resume of suspended / crashed renders) :
if os.path.exists(imageFrameFileName) == False:
# print("Animation render frame file does not exist; writing frame.")
coords_set_to_image(canvas, imageFrameFileName)
animationFrameCounter += 1
# END GLOBAL FUNCTIONS
# END OPTIONS AND GLOBALS
"""START MAIN FUNCTIONALITY."""
print('Initializing render script..')
# A dict of Coordinate objects which is used with tracking sets to fill a "canvas:"
canvas = []
# A set of coordinates (tuples, not Coordinate objects) which are free for the taking:
unallocd_coords = set()
# A set of coordinates (again tuples) which are set aside (allocated) for use:
allocd_coords = set()
# A set of coordinates (again tuples) which have been color mutated and may no longer
# coordinate mutate:
filled_coords = set()
coord_queue = []
# Initialize canvas dict and unallocd_coords set (canvas being a dict of Coordinates with
# tuple coordinates as keys:
for y in range(0, HEIGHT): # for columns (x) in row)
canvas.append([])
for x in range(0, WIDTH): # over the columns, prep and add:
unallocd_coords.add((y, x))
canvas[y].append([-1,-1,-1])
# If ARGS.CUSTOM_COORDS_AND_COLORS was not passed to script, initialize allocd_coords set by random selection from unallocd_coords (and remove from unallocd_coords); structure of coords is (y,x)
if not ARGS.CUSTOM_COORDS_AND_COLORS:
print('no --CUSTOM_COORDS_AND_COLORS argument passed to script, so initializing coordinate locations randomly . . .')
RNDcoord = random.sample(unallocd_coords, START_COORDS_N)
for coord in RNDcoord:
coord_queue.append(coord)
if COLOR_MUTATION_BASE == "random":
canvas[coord[0]][coord[1]] = np.random.randint(0, 255, 3)
else:
canvas[coord[0]][coord[1]] = COLOR_MUTATION_BASE
# If ARGS.CUSTOM_COORDS_AND_COLORS was passed to script, init coords and their colors from it:
else:
print('--CUSTOM_COORDS_AND_COLORS argument passed to script, so initializing coords and colors from that. NOTE that this overrides --START_COORDS_N, --START_COORDS_RANGE, and --COLOR_MUTATION_BASE if those were provided.')
print('\n')
for element in CUSTOM_COORDS_AND_COLORS:
# SWAPPING those (on CLI they are x,y; here it wants y,x) ;
# ALSO, this program kindly allows hoomans to not bother with zero-based indexing, which means 1 for hoomans is 0 for program, so substracting 1 from both values:
coord = (element[0][1], element[0][0])
# print('without mod:', coord)
coord = (element[0][1]-1, element[0][0]-1)
# print('with mod:', coord)
coord_queue.append(coord)
color_values = np.asarray(element[1]) # np.asarray() gets it into same object type as elsewhere done and expected.
# print('adding color to canvas:', color_values) MINDING the x,y swap AND to modify the hooman 1-based index here, too! :
canvas[ element[0][1]-1 ][ element[0][0]-1 ] = color_values # LORF!
report_stats_every_n = 5000
report_stats_nth_counter = 0
# Render target file name generation; differs in different scenarios:
# If a preset was loaded, base the render target file name on it.
if ARGS.LOAD_PRESET:
# take trailing .cgp off it:
render_target_file_base_name = LOAD_PRESET.rstrip('.cgp')
else:
# Otherwise, create render target file name based on time painting began.
now = datetime.datetime.now()
time_stamp = now.strftime('%Y_%m_%d__%H_%M_%S__')
# VESTIGAL CODE; most versions of this script here altered the pseudorandom sequence of --RANDOM_SEED with the following line of code (that makes an rndStr); this had been commented out around v2.3.6 - v2.5.5 (maybe?), which broke with psuedorandom continuity as originally developed in the script. For continuity (and because output seemed randomly better _with_ this code), it is left here;
# ALSO NOTE:
# in trying to track down this issue some versions of the script had the following line of code before the above if ARGS.LOAD_PRESET; but now I think it _would_ have been here (also git history isn't complete on versions, I think, so I'm speculating); if you can't duplicate the rnd state of a render, you may want to try copying it up there.
rndStr = ('%03x' % random.randrange(16**6))
render_target_file_base_name = time_stamp + '__' + rndStr + '_colorGrowthPy'
# Check if render target file with same name (but .png) extension exists. This logic is very slightly risky: if render_target_file_base_name does not exist, I will assume that state image file name and anim frames folder names also do not exist; if I am wrong, those may get overwritten (by other logic in this script).
target_render_file_exists = os.path.exists(render_target_file_base_name + '.png')
# If it does not exist, set render target file name to that ( + '.png'). In that case, the following following "while" block will never execute. BUT if it does exist, the following "while" block _will_ execute, and do this: rename the render target file name by appending six rnd hex chars to it plus 'var', e.g. 'var_32ef5f' to file base name, and keep checking and doing that over again until there's no target name conflict:
cgp_rename_count = 1
while target_render_file_exists == True:
# Returns six random lowercase hex characters:
cgp_rename_count += 1; variantNameStr = str(cgp_rename_count)
variantNameStr = variantNameStr.zfill(4)
tst_str = render_target_file_base_name + '__variant_' + variantNameStr
target_render_file_exists = os.path.exists(tst_str + '.png')
if cgp_rename_count > 10000:
print(
"Encountered 10,000 naming collisions making new render target file \
names. Please make a copy of and rename the source .cgp file before \
continuning, Sparkles McSparkly. Exiting."
)
sys.exit(1)
if target_render_file_exists == False:
render_target_file_base_name = tst_str
render_target_file_name = render_target_file_base_name + '.png'
anim_frames_folder_name = render_target_file_base_name + '_frames'
print('\nrender_target_file_name: ', render_target_file_name)
print('anim_frames_folder_name: ', anim_frames_folder_name)
# If SAVE_EVERY_N has a value greater than zero, create a subfolder to write frames to; Also, initialize a variable which is how many zeros to pad animation save frame file (numbers) to, based on how many frames will be rendered:
if SAVE_EVERY_N > 0:
padFileNameNumbersDigitsWidth = len(str(stopRenderAtPixelsN))
# Only create the anim frames folder if it does not exist:
if os.path.exists(anim_frames_folder_name) == False:
os.mkdir(anim_frames_folder_name)
# If bool set saying so, save arguments to this script to a .cgp file with the target render base file name:
if SAVE_PRESET:
# strip the --LOAD_PRESET parameter and value from SCRIPT_ARGS_STR before writing it to preset file (and save it in a new variable), as it would be redundant (and, if the parameters are based on loading another preset and overriding some parameters, it would moreover be wrong) :
SCRIPT_ARGS_WRITE_STR = re.sub('--LOAD_PRESET [^ ]*', r'', SCRIPT_ARGS_STR)
file = open(render_target_file_base_name + '.cgp', "w")
file.write(SCRIPT_ARGS_WRITE_STR + '\n\n')
if ARGS.LOAD_PRESET:
file.write('# Derived of preset: ' + LOAD_PRESET + '\n')
file.write('# Created with color_growth.py ' + ColorGrowthPyVersionString + '\n')
file.write('# Python version: ' + sys.version + '\n')
file.write('# Platform: ' + platform.platform() + '\n')
file.close()
# ----
# START IMAGE MAPPING
painted_coordinates = 0
# With higher VISCOSITY some coordinates can be painted around (by other coordinates on all sides) but coordinate mutation never actually moves into that coordinate. The result is that some coordinates may never be "born." this set and associated code revives orphan coordinates:
potential_orphan_coords_two = set()
# used to reclaim orphan coordinates every N iterations through the `while allocd_coords` loop:
base_orphan_reclaim_multiplier = 0.015
orphans_to_reclaim_n = 0
coords_painted_since_reclaim = 0
# These next two variables are used to ramp up orphan coordinate reclamation rate as the render proceeds:
print('Generating image . . . ')
newly_painted_coords = 0 # This is reset at every call of print_progress()
continue_painting = True
while coord_queue:
if continue_painting == False:
break
while coord_queue:
index = np.random.randint(0, len(coord_queue))
y, x = coord_queue[index]
if index == len(coord_queue) - 1:
coord_queue.pop()
else:
coord_queue[index] = coord_queue.pop()
# Mutate color--! and assign it to the color variable (list) in the Coordinate object:
canvas[y][x] = canvas[y][x] + np.random.randint(-RSHIFT, RSHIFT + 1, size=3) / 2
# print('Colored coordinate (y, x)', coord)
new_allocd_coords_color = canvas[y][x] = np.clip(canvas[y][x], 0, 255)
painted_coordinates += 1
newly_painted_coords += 1
coords_painted_since_reclaim += 1
# The first returned set is used straightway, the second optionally shuffles into the first after the first is depleted:
rnd_new_coords_set, potential_orphan_coords_one = get_rnd_unallocd_neighbors(y, x, canvas)
for new_y, new_x in rnd_new_coords_set:
coord_queue.append((new_y, new_x))
if BORDER_BLEND and is_coord_in_bounds(2*new_y-y, 2*new_x-x) and is_color_valid(2*new_y-y, 2*new_x-x, canvas):
canvas[new_y][new_x] = (np.array(new_allocd_coords_color) + np.array(canvas[2*new_y-y][2*new_x-x])) / 2
else:
canvas[new_y][new_x] = new_allocd_coords_color
# Save an animation frame (function only does if SAVE_EVERY_N True):
save_animation_frame()
# Print progress:
if report_stats_nth_counter == 0 or report_stats_nth_counter == report_stats_every_n:
print_progress(newly_painted_coords)
newly_painted_coords = 0
report_stats_nth_counter = 0
report_stats_nth_counter += 1
# Terminate all coordinate and color mutation at an arbitary number of mutations:
if painted_coordinates > stopRenderAtPixelsN:
print('Painted coordinate termination count', painted_coordinates, 'exceeded. Ending paint algorithm.')
continue_painting = False
break
if RECLAIM_ORPHANS:
for y in range(0, HEIGHT):
for x in range(0, WIDTH):
if not is_color_valid(y, x, canvas):
adj_color = find_adjacent_color(y, x, canvas)
if adj_color is not None:
coord_queue.append((y, x))
canvas[y][x] = adj_color + np.random.randint(-RSHIFT, RSHIFT + 1, size=3) / 2
canvas[y][x] = np.clip(canvas[y][x], 0, 255)
orphans_to_reclaim_n += 1
# END IMAGE MAPPING
# ----
# Works around problem that this setup can (always does?) save everything _except_ for a last frame with every coordinate painted if painted_coordinates >= stopRenderAtPixelsN and STOP_AT_PERCENT == 1; is there a better-engineered way to fix this problem? But this works:
if SAVE_EVERY_N != 0:
set_img_frame_file_name()
coords_set_to_image(canvas, imageFrameFileName)
# Save final image file:
print('Saving image ', render_target_file_name, ' . . .')
coords_set_to_image(canvas, render_target_file_name)
print('Render complete and image saved.')
# END MAIN FUNCTIONALITY.
|
r-alex-hall/fontDevTools
|
scripts/imgAndVideo/color_growth.py
|
Python
|
gpl-3.0
| 45,584 | 0.005484 |
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# SLEPc - Scalable Library for Eigenvalue Problem Computations
# Copyright (c) 2002-2015, Universitat Politecnica de Valencia, Spain
#
# This file is part of SLEPc.
#
# SLEPc is free software: you can redistribute it and/or modify it under the
# terms of version 3 of the GNU Lesser General Public License as published by
# the Free Software Foundation.
#
# SLEPc is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
# more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SLEPc. If not, see <http://www.gnu.org/licenses/>.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
import package, os, sys, commands
class PETSc(package.Package):
def __init__(self,argdb,log):
self.packagename = 'petsc'
self.downloadable = False
self.log = log
def Check(self):
self.havepackage = self.Link([],[],[])
def InitDir(self):
if 'PETSC_DIR' in os.environ:
self.dir = os.environ['PETSC_DIR']
if not os.path.exists(self.dir):
sys.exit('ERROR: PETSC_DIR enviroment variable is not valid')
else:
if slepc.prefixdir:
self.dir = slepc.prefixdir
os.environ['PETSC_DIR'] = self.dir
else:
sys.exit('ERROR: PETSC_DIR enviroment variable is not set')
def LoadVersion(self):
try:
f = open(os.path.join(self.dir,'include','petscversion.h'))
for l in f.readlines():
l = l.split()
if len(l) == 3:
if l[1] == 'PETSC_VERSION_RELEASE':
self.release = l[2]
if l[1] == 'PETSC_VERSION_MAJOR':
major = l[2]
elif l[1] == 'PETSC_VERSION_MINOR':
minor = l[2]
elif l[1] == 'PETSC_VERSION_SUBMINOR':
subminor = l[2]
elif l[1] == 'PETSC_VERSION_PATCH':
patchlevel = l[2]
f.close()
self.version = major + '.' + minor
self.lversion = major + '.' + minor + '.' + subminor
except:
self.log.Exit('ERROR: File error while reading PETSc version')
# Check whether this is a working copy of the repository
self.isrepo = False
if os.path.exists(os.path.join(self.dir,'.git')):
(status, output) = commands.getstatusoutput('cd '+self.dir+';git rev-parse')
if not status:
self.isrepo = True
(status, self.gitrev) = commands.getstatusoutput('cd '+self.dir+';git log -1 --pretty=format:%H')
(status, self.gitdate) = commands.getstatusoutput('cd '+self.dir+';git log -1 --pretty=format:%ci')
(status, self.branch) = commands.getstatusoutput('cd '+self.dir+';git describe --contains --all HEAD')
def LoadConf(self):
if 'PETSC_ARCH' in os.environ and os.environ['PETSC_ARCH']:
self.isinstall = False
self.arch = os.environ['PETSC_ARCH']
petscvariables = os.path.join(self.dir,self.arch,'lib','petsc','conf','petscvariables')
petscconf_h = os.path.join(self.dir,self.arch,'include','petscconf.h')
else:
self.isinstall = True
petscvariables = os.path.join(self.dir,'lib','petsc','conf','petscvariables')
petscconf_h = os.path.join(self.dir,'include','petscconf.h')
self.build_using_cmake = 0
self.make_is_gnumake = 0
self.language = 'c'
self.bfort = 'nobfortinpetsc'
try:
f = open(petscvariables)
for l in f.readlines():
r = l.split('=',1)
if len(r)!=2: continue
k = r[0].strip()
v = r[1].strip()
if k == 'PETSC_SCALAR':
self.scalar = v
elif k == 'PETSC_PRECISION':
self.precision = v
elif k == 'MAKE':
self.make = v
elif k == 'DESTDIR':
self.destdir = v
elif k == 'BFORT':
self.bfort = v
elif k == 'TEST_RUNS':
self.test_runs = v
elif k == 'CC':
self.cc = v
elif k == 'CC_FLAGS':
self.cc_flags = v
elif k == 'FC' and not v=='':
self.fc = v
elif k == 'AR':
self.ar = v
elif k == 'AR_FLAGS':
self.ar_flags = v
elif k == 'AR_LIB_SUFFIX':
self.ar_lib_suffix = v
elif k == 'CC_LINKER_SLFLAG':
self.slflag = v
elif k == 'RANLIB':
self.ranlib = v
elif k == 'PETSC_BUILD_USING_CMAKE':
self.build_using_cmake = v
elif k == 'MAKE_IS_GNUMAKE':
self.make_is_gnumake = v
elif k == 'PETSC_LANGUAGE' and v=='CXXONLY':
self.language = 'c++'
f.close()
except:
self.log.Exit('ERROR: cannot process file ' + petscvariables)
self.ind64 = False
self.mpiuni = False
self.debug = False
self.singlelib = False
try:
f = open(petscconf_h)
for l in f.readlines():
l = l.split()
if len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_64BIT_INDICES' and l[2]=='1':
self.ind64 = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_HAVE_MPIUNI' and l[2]=='1':
self.mpiuni = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_DEBUG' and l[2]=='1':
self.debug = True
elif len(l)==3 and l[0]=='#define' and l[1]=='PETSC_USE_SINGLE_LIBRARY' and l[2]=='1':
self.singlelib = True
elif self.isinstall and len(l)==3 and l[0]=='#define' and l[1]=='PETSC_ARCH':
self.arch = l[2].strip('"')
f.close()
except:
if self.isinstall:
self.log.Exit('ERROR: cannot process file ' + petscconf_h + ', maybe you forgot to set PETSC_ARCH')
else:
self.log.Exit('ERROR: cannot process file ' + petscconf_h)
# empty PETSC_ARCH, guess an arch name
if self.isinstall and not self.arch:
self.arch = 'arch-' + sys.platform.replace('cygwin','mswin')+ '-' + self.language
if self.debug:
self.arch += '-debug'
else:
self.arch += '-opt'
if not 'real' in self.scalar:
self.arch += '-' + self.scalar
|
OpenCMISS-Dependencies/slepc
|
config/packages/petsc.py
|
Python
|
lgpl-3.0
| 6,234 | 0.022137 |
from numpy import array, compress, zeros
import wx
from wx.lib.mixins.listctrl import ListCtrlAutoWidthMixin
from spacq.interface.list_columns import ListParser
"""
Embeddable, generic, virtual, tabular display.
"""
class VirtualListCtrl(wx.ListCtrl, ListCtrlAutoWidthMixin):
"""
A generic virtual list.
"""
max_value_len = 250 # Characters.
@staticmethod
def find_type(value):
"""
Determine the type of a column based on a single value.
The type is one of: scalar, list, string.
"""
try:
float(value)
except ValueError:
pass
else:
return 'scalar'
try:
ListParser()(value)
except ValueError:
pass
else:
return 'list'
return 'string'
def __init__(self, parent, *args, **kwargs):
wx.ListCtrl.__init__(self, parent,
style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES,
*args, **kwargs)
ListCtrlAutoWidthMixin.__init__(self)
self.reset()
def reset(self):
self.headings = []
self.data = array([])
self.filtered_data = None
self.display_data = array([])
self.types = []
def refresh_with_values(self, data):
self.ItemCount = len(data)
if self.ItemCount > 0:
self.display_data = zeros(data.shape, dtype='|S{0}'.format(self.max_value_len))
for i, _ in enumerate(self.headings):
# Truncate for display.
self.display_data[:,i] = [str(x)[:self.max_value_len] for x in data[:,i]]
self.Refresh()
def apply_filter(self, f, afresh=False):
"""
Set the data to be the old data, along with the application of a filter.
f is a function of two parameters: the index of the row and the row itself.
f must return True if the row is to be kept and False otherwise.
If afresh is True, all old filtered data is discarded.
Otherwise, a new filter can be quickly applied.
"""
if afresh:
self.filtered_data = None
if self.filtered_data is not None:
original_set = self.filtered_data
else:
original_set = self.data
self.filtered_data = compress([f(i, x) for i, x in enumerate(original_set)], original_set, axis=0)
self.refresh_with_values(self.filtered_data)
def GetValue(self, types=None):
# Get all types by default.
if types is None:
types = set(self.types)
else:
types = set(types)
# Find column indices of the correct type.
idxs = [i for i, t in enumerate(self.types) if t in types]
if self.filtered_data is not None:
data = self.filtered_data
else:
data = self.data
return ([self.headings[i] for i in idxs], data[:,idxs], [self.types[i] for i in idxs])
def SetValue(self, headings, data):
"""
headings: A list of strings.
data: A 2D NumPy array.
"""
self.ClearAll()
self.reset()
self.headings = headings
self.data = data
self.refresh_with_values(self.data)
if self.ItemCount > 0:
width, height = self.GetSize()
# Give some room for the scrollbar.
col_width = (width - 50) / len(self.headings)
for i, heading in enumerate(self.headings):
self.InsertColumn(i, heading, width=col_width)
type = self.find_type(data[0,i])
self.types.append(type)
def OnGetItemText(self, item, col):
"""
Return cell value for LC_VIRTUAL.
"""
return self.display_data[item,col]
class TabularDisplayPanel(wx.Panel):
"""
A panel to display arbitrary tabular data.
"""
def __init__(self, parent, *args, **kwargs):
wx.Panel.__init__(self, parent, *args, **kwargs)
# Panel.
panel_box = wx.BoxSizer(wx.VERTICAL)
## Table.
self.table = VirtualListCtrl(self)
panel_box.Add(self.table, proportion=1, flag=wx.EXPAND)
self.SetSizer(panel_box)
def __len__(self):
return self.table.ItemCount
def from_csv_data(self, has_header, values):
"""
Import the given CSV data into the table.
If has_header is True, the first row is treated specially.
"""
if has_header:
headers, rows = values[0], array(values[1:])
else:
headers, rows = [''] * len(values[0]), array(values)
# Ensure that all columns have a header.
for i, header in enumerate(headers):
if not header:
headers[i] = 'Column {0}'.format(i + 1)
self.SetValue(headers, rows)
def GetValue(self, *args, **kwargs):
return self.table.GetValue(*args, **kwargs)
def SetValue(self, headings, values):
self.table.SetValue(headings, values)
class TabularDisplayFrame(wx.Frame):
def __init__(self, parent, *args, **kwargs):
wx.Frame.__init__(self, parent, *args, **kwargs)
# Frame.
frame_box = wx.BoxSizer(wx.VERTICAL)
## Display panel.
self.display_panel = TabularDisplayPanel(self)
frame_box.Add(self.display_panel, proportion=1, flag=wx.EXPAND)
self.SetSizer(frame_box)
|
ghwatson/SpanishAcquisitionIQC
|
spacq/gui/display/table/generic.py
|
Python
|
bsd-2-clause
| 4,604 | 0.03258 |
ranged_attacker = "ranged attacker"
melee_attacker = "melee attacker"
healer = 'healer'
dismantling_attacker = 'dismantler'
general_attacker = 'general attacker'
tough_attacker = 'tough guy'
work_and_carry_attacker = 'multi-purpose attacker'
civilian = 'civilian'
scout = 'scout'
|
daboross/screeps-warreport
|
warreport/constants.py
|
Python
|
mit
| 280 | 0 |
import time
import tensorflow as tf
import numpy as np
import pandas as pd
from scipy.misc import imread
from alexnet import AlexNet
sign_names = pd.read_csv('signnames.csv')
nb_classes = 43
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
resized = tf.image.resize_images(x, (227, 227))
# NOTE: By setting `feature_extract` to `True` we return
# the second to last layer.
fc7 = AlexNet(resized, feature_extract=True)
# TODO: Define a new fully connected layer followed by a softmax activation to classify
# the traffic signs. Assign the result of the softmax activation to `probs` below.
shape = (fc7.get_shape().as_list()[-1], nb_classes) # use this shape for the weight matrix
fc8W = tf.Variable(tf.truncated_normal(shape, stddev=1e-2))
fc8b = tf.Variable(tf.zeros(nb_classes))
logits = tf.nn.xw_plus_b(fc7, fc8W, fc8b)
probs = tf.nn.softmax(logits)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
# Read Images
im1 = imread("construction.jpg").astype(np.float32)
im1 = im1 - np.mean(im1)
im2 = imread("stop.jpg").astype(np.float32)
im2 = im2 - np.mean(im2)
# Run Inference
t = time.time()
output = sess.run(probs, feed_dict={x: [im1, im2]})
# Print Output
for input_im_ind in range(output.shape[0]):
inds = np.argsort(output)[input_im_ind, :]
print("Image", input_im_ind)
for i in range(5):
print("%s: %.3f" % (sign_names.ix[inds[-1 - i]][1], output[input_im_ind, inds[-1 - i]]))
print()
print("Time: %.3f seconds" % (time.time() - t))
|
Raag079/self-driving-car
|
Term01-Computer-Vision-and-Deep-Learning/Labs/05-CarND-Alexnet-Feature-Extraction/feature_extraction.py
|
Python
|
mit
| 1,499 | 0.002668 |
import json
import bottle
from pyrouted.util import make_spec
def route(method, path):
def decorator(f):
f.http_route = path
f.http_method = method
return f
return decorator
class APIv1(object):
prefix = '/v1'
def __init__(self, ndb, config):
self.ndb = ndb
self.config = config
@route('GET', '/sources')
def sources_list(self, mode='short'):
ret = {}
mode = bottle.request.query.mode or mode
for name, spec in self.ndb.sources.items():
ret[name] = {'class': spec.nl.__class__.__name__,
'status': spec.status}
if mode == 'full':
ret[name]['config'] = spec.nl_kwarg
return bottle.template('{{!ret}}', ret=json.dumps(ret))
@route('PUT', '/sources')
def sources_restart(self):
node = bottle.request.body.getvalue().decode('utf-8')
self.ndb.sources[node].start()
@route('POST', '/sources')
def sources_add(self):
data = bottle.request.body.getvalue().decode('utf-8')
node, spec = make_spec(data, self.config)
self.config['sources'].append(node)
self.ndb.connect_source(node, spec)
@route('DELETE', '/sources')
def sources_del(self):
node = bottle.request.body.getvalue().decode('utf-8')
self.config['sources'].remove(node)
self.ndb.disconnect_source(node)
@route('GET', '/config')
def config_get(self):
return bottle.template('{{!ret}}',
ret=json.dumps(self.config))
@route('PUT', '/config')
def config_dump(self):
path = bottle.request.body.getvalue().decode('utf-8')
self.config.dump(path)
@route('GET', '/<name:re:(%s|%s|%s|%s|%s|%s)>' % ('interfaces',
'addresses',
'routes',
'neighbours',
'vlans',
'bridges'))
def view(self, name):
ret = []
obj = getattr(self.ndb, name)
for line in obj.dump():
ret.append(line)
return bottle.template('{{!ret}}', ret=json.dumps(ret))
@route('GET', '/query/<name:re:(%s|%s|%s|%s)>' % ('nodes',
'p2p_edges',
'l2_edges',
'l3_edges'))
def query(self, name):
ret = []
obj = getattr(self.ndb.query, name)
for line in obj():
ret.append(line)
return bottle.template('{{!ret}}', ret=json.dumps(ret))
|
svinota/pyrouted
|
pyrouted/api.py
|
Python
|
gpl-2.0
| 2,803 | 0 |
from ert.test import TestRun
from ert.test import path_exists
from ert.test import SourceEnumerator
from ert.test import TestArea , TestAreaContext
from ert.test import ErtTestRunner
from ert.test import PathContext
from ert.test import LintTestCase
from ert.test import ImportTestCase
from tests import EclTest
class ErtLegacyTestTest(EclTest):
pass
|
Statoil/libecl
|
python/tests/legacy_tests/test_test.py
|
Python
|
gpl-3.0
| 358 | 0.002793 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2015, Tony Asleson <tasleson@redhat.com>
import dbus
import cfg
from utils import get_properties, add_properties, get_object_property_diff
from state import State
# noinspection PyPep8Naming
class AutomatedProperties(dbus.service.Object):
"""
This class implements the needed interfaces for:
org.freedesktop.DBus.Properties
Other classes inherit from it to get the same behavior
"""
DBUS_INTERFACE = ''
def __init__(self, object_path, interface, search_method=None):
dbus.service.Object.__init__(self, cfg.bus, object_path)
self._ap_interface = interface
self._ap_o_path = object_path
self._ap_search_method = search_method
self.state = None
def dbus_object_path(self):
return self._ap_o_path
def emit_data(self):
props = {}
for i in self.interface():
props[i] = self.GetAll(i)
return self._ap_o_path, props
def interface(self, all_interfaces=False):
return [self._ap_interface]
# Properties
# noinspection PyUnusedLocal
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ss', out_signature='v')
def Get(self, interface_name, property_name):
value = getattr(self, property_name)
# Note: If we get an exception in this handler we won't know about it,
# only the side effect of no returned value!
print 'Get (%s), type (%s), value(%s)' % \
(property_name, str(type(value)), str(value))
return value
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='s', out_signature='a{sv}')
def GetAll(self, interface_name):
if interface_name in self.interface():
# Using introspection, lets build this dynamically
return get_properties(self, interface_name)[1]
raise dbus.exceptions.DBusException(
self._ap_interface,
'The object %s does not implement the %s interface'
% (self.__class__, interface_name))
@dbus.service.method(dbus_interface=dbus.PROPERTIES_IFACE,
in_signature='ssv')
def Set(self, interface_name, property_name, new_value):
setattr(self, property_name, new_value)
self.PropertiesChanged(interface_name,
{property_name: new_value}, [])
# As dbus-python does not support introspection for properties we will
# get the autogenerated xml and then add our wanted properties to it.
@dbus.service.method(dbus_interface=dbus.INTROSPECTABLE_IFACE,
out_signature='s')
def Introspect(self):
r = dbus.service.Object.Introspect(self, self._ap_o_path, cfg.bus)
# Look at the properties in the class
return add_properties(r, self._ap_interface, get_properties(self)[0])
@dbus.service.signal(dbus_interface=dbus.PROPERTIES_IFACE,
signature='sa{sv}as')
def PropertiesChanged(self, interface_name, changed_properties,
invalidated_properties):
print('SIGNAL: PropertiesChanged(%s, %s, %s, %s)' %
(str(self._ap_o_path), str(interface_name),
str(changed_properties), str(invalidated_properties)))
def refresh(self, search_key=None, object_state=None):
"""
Take the values (properties) of an object and update them with what
lvm currently has. You can either fetch the new ones or supply the
new state to be updated with
:param search_key: The value to use to search for
:param object_state: Use this as the new object state
"""
num_changed = 0
# If we can't do a lookup, bail now, this happens if we blindly walk
# through all dbus objects as some don't have a search method, like
# 'Manager' object.
if not self._ap_search_method:
return
search = self.lvm_id
if search_key:
search = search_key
# Either we have the new object state or we need to go fetch it
if object_state:
new_state = object_state
else:
new_state = self._ap_search_method([search])[0]
assert isinstance(new_state, State)
assert new_state
# When we refresh an object the object identifiers might have changed
# because LVM allows the user to change them (name & uuid), thus if
# they have changed we need to update the object manager so that
# look-ups will happen correctly
old_id = self.state.identifiers()
new_id = new_state.identifiers()
if old_id[0] != new_id[0] or old_id[1] != new_id[1]:
cfg.om.lookup_update(self)
# Grab the properties values, then replace the state of the object
# and retrieve the new values
# TODO: We need to add locking to prevent concurrent access to the
# properties so that a client is not accessing while we are
# replacing.
o_prop = get_properties(self)[1]
self.state = new_state
n_prop = get_properties(self)[1]
changed = get_object_property_diff(o_prop, n_prop)
if changed:
self.PropertiesChanged(self._ap_interface, changed, [])
num_changed += 1
return num_changed
|
vpodzime/lvm-dubstep
|
lvmdbus/automatedproperties.py
|
Python
|
gpl-3.0
| 6,035 | 0 |
from RPi import GPIO as gpio
from robotics.actors.redbot_motor_actor import RedbotMotorActor
from robotics.interfaces.spi.mcp3008_spi_interface import MCP3008SpiInterface
from robotics.robots.aizek_robot import AizekRobot
from robotics.sensors.redbot_wheel_encoder_sensor import RedbotWheelEncoderSensor
from robotics.sensors.sharp_ir_distance_sensor import SharpIrDistanceSensor
class RobotFactory(object):
@staticmethod
def createAizekRobot():
gpio.setmode(gpio.BOARD)
lmotor = RedbotMotorActor(gpio, 8, 10, 12)
rmotor = RedbotMotorActor(gpio, 11, 13, 15)
spi = MCP3008SpiInterface(0)
wencoder = RedbotWheelEncoderSensor(spi)
lsensor = SharpIrDistanceSensor(spi, 5)
fsensor = SharpIrDistanceSensor(spi, 4)
rsensor = SharpIrDistanceSensor(spi, 3)
wheel_radius = 0.032
wheel_distance = 0.1
robot = AizekRobot(
left_motor=lmotor,
right_motor=rmotor,
wheel_encoder=wencoder,
left_distance_sensor=lsensor,
front_distance_sensor=fsensor,
right_distance_sensor=rsensor,
wheel_radius=wheel_radius,
wheel_distance=wheel_distance,
)
return robot
|
asydorchuk/robotics
|
python/robotics/robots/factory.py
|
Python
|
mit
| 1,253 | 0.000798 |
"""Tests for chebyshev module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(object):
def test_chebint(self):
# check exceptions
assert_raises(ValueError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
assert_raises(ValueError, cheb.chebint, [0], lbnd=[0])
assert_raises(ValueError, cheb.chebint, [0], scl=[0])
assert_raises(ValueError, cheb.chebint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k])
res = cheb.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
res = cheb.chebint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c) for c in c2d])
res = cheb.chebint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
res = cheb.chebint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(object):
def test_chebder(self):
# check exceptions
assert_raises(ValueError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = cheb.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
res = cheb.chebder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebder(c) for c in c2d])
res = cheb.chebder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_chebvander(self):
# check for 1d x
x = np.arange(3)
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
def test_chebvander2d(self):
# also tests chebval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = cheb.chebvander2d(x1, x2, [1, 2])
tgt = cheb.chebval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_chebvander3d(self):
# also tests chebval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = cheb.chebvander3d(x1, x2, x3, [1, 2, 3])
tgt = cheb.chebval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = cheb.chebvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(object):
def test_chebfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
assert_raises(TypeError, cheb.chebfit, [], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, cheb.chebfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = cheb.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
#
coef4 = cheb.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
#
coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = cheb.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
# test fitting only even polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = cheb.chebfit(x, y, 4)
assert_almost_equal(cheb.chebval(x, coef1), y)
coef2 = cheb.chebfit(x, y, [0, 2, 4])
assert_almost_equal(cheb.chebval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(-1, 1, 10)
for deg in range(0, 10):
for p in range(0, deg + 1):
c = cheb.chebinterpolate(powx, deg, (p,))
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
assert_raises(ValueError, cheb.chebcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(cheb.chebcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
class TestGauss(object):
def test_100(self):
x, w = cheb.chebgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = cheb.chebvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.pi
assert_almost_equal(w.sum(), tgt)
class TestMisc(object):
def test_chebfromroots(self):
res = cheb.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = cheb.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res), trim(tgt))
def test_chebroots(self):
assert_almost_equal(cheb.chebroots([1]), [])
assert_almost_equal(cheb.chebroots([1, 2]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = cheb.chebroots(cheb.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, cheb.chebtrim, coef, -1)
# Test results
assert_equal(cheb.chebtrim(coef), coef[:-1])
assert_equal(cheb.chebtrim(coef, 1), coef[:-3])
assert_equal(cheb.chebtrim(coef, 2), [0])
def test_chebline(self):
assert_equal(cheb.chebline(3, 4), [3, 4])
def test_cheb2poly(self):
for i in range(10):
assert_almost_equal(cheb.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self):
for i in range(10):
assert_almost_equal(cheb.poly2cheb(Tlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-1, 1, 11)[1:-1]
tgt = 1./(np.sqrt(1 + x) * np.sqrt(1 - x))
res = cheb.chebweight(x)
assert_almost_equal(res, tgt)
def test_chebpts1(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts1, 1.5)
assert_raises(ValueError, cheb.chebpts1, 0)
#test points
tgt = [0]
assert_almost_equal(cheb.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(cheb.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(cheb.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(cheb.chebpts1(4), tgt)
def test_chebpts2(self):
#test exceptions
assert_raises(ValueError, cheb.chebpts2, 1.5)
assert_raises(ValueError, cheb.chebpts2, 1)
#test points
tgt = [-1, 1]
assert_almost_equal(cheb.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(cheb.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(cheb.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(cheb.chebpts2(5), tgt)
|
tynn/numpy
|
numpy/polynomial/tests/test_chebyshev.py
|
Python
|
bsd-3-clause
| 20,348 | 0.000934 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-09-18 19:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coursedashboards', '0005_auto_20170915_2036'),
]
operations = [
migrations.CreateModel(
name='CourseOfferingMajor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField()),
],
options={
'db_table': 'CourseOfferingMajor',
},
),
migrations.AlterUniqueTogether(
name='coursemajor',
unique_together=set([]),
),
migrations.RemoveField(
model_name='coursemajor',
name='course',
),
migrations.RemoveField(
model_name='coursemajor',
name='major',
),
migrations.AlterField(
model_name='course',
name='curriculum',
field=models.CharField(max_length=20),
),
migrations.DeleteModel(
name='CourseMajor',
),
migrations.AddField(
model_name='courseofferingmajor',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Course'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='major',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Major'),
),
migrations.AddField(
model_name='courseofferingmajor',
name='term',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='coursedashboards.Term'),
),
migrations.AlterUniqueTogether(
name='courseofferingmajor',
unique_together=set([('major', 'term', 'course')]),
),
]
|
uw-it-aca/course-dashboards
|
coursedashboards/migrations/0006_auto_20170918_1954.py
|
Python
|
apache-2.0
| 2,064 | 0.001938 |
"""
WSGI config for django_model_deploy project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
juanpex/django-model-deploy
|
test_project/wsgi.py
|
Python
|
bsd-3-clause
| 1,153 | 0.000867 |
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Div2')
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("inplace", [False, True])
def test_div2_double_backward(inplace, seed, ctx, func_name):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
inputs = [rng.randn(2, 3).astype(np.float32),
rng.randn(2, 3).astype(np.float32) * 2]
backward_function_tester(rng, F.div2,
inputs=inputs,
func_args=[inplace], func_kwargs={},
atol_accum=1e-1,
dstep=1e-4,
ctx=ctx)
|
sony/nnabla
|
python/test/function/test_div2.py
|
Python
|
apache-2.0
| 1,404 | 0 |
"""Utility to compare (Numpy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
import re
from scipy._lib.six import string_types
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
Numpy has the following versioning scheme (numbers given are examples; they
can be >9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance.
Parameters
----------
vstring : str
Numpy version string (``np.__version__``).
Notes
-----
All dev versions of the same (pre-)release compare equal.
Examples
--------
>>> from scipy._lib._version import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (string_types, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, string_types):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr__(self):
return "NumpyVersion(%s)" % self.vstring
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/_lib/_version.py
|
Python
|
mit
| 4,793 | 0.000209 |
"""
Django settings for practica4 project.
Generated by 'django-admin startproject' using Django 1.9.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os,django
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-vsi*yr21o3)d%-u%ho28+tdci&afj5-lk4sqo#c%-9(itd!v@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bares',
'easy_maps',
'bootstrap_toolkit',
]
if django.VERSION < (1, 7):
INSTALLED_APPS += (
'south',
)
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'practica4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_PATH],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'practica4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR,'static')
STATIC_ROOT = 'staticfiles'
STATICFILES_DIRS = (
STATIC_PATH,
)
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
REGISTRATION_OPEN = True # If True, users can register
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
REGISTRATION_AUTO_LOGIN = True # If True, the user will be automatically logged in.
LOGIN_REDIRECT_URL = '/bares/' # The page you want users to arrive at after they successful log in
LOGIN_URL = '/accounts/login/' # The page users are directed to if they are not logged in,
# and are trying to access pages requiring authentication
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') # Absolute path to the media directory
|
acasadoquijada/bares
|
practica4/settings.py
|
Python
|
gpl-3.0
| 4,257 | 0.003993 |
from django import forms
from .models import Join
class EmailForm(forms.Form):
email = forms.EmailField()
class JoinForm(forms.ModelForm):
class Meta:
model = Join
fields = ["email",]
|
loafbaker/django_launch_with_code
|
joins/forms.py
|
Python
|
mit
| 211 | 0.018957 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for StringToHashBucket op from string_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
class StringToHashBucketOpTest(test.TestCase):
def testStringToOneHashBucketFast(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringToHashBucketsFast(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket_fast(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c', 'd']})
# Fingerprint64('a') -> 12917804110809363939 -> mod 10 -> 9
# Fingerprint64('b') -> 11795596070477164822 -> mod 10 -> 2
# Fingerprint64('c') -> 11430444447143000872 -> mod 10 -> 2
# Fingerprint64('d') -> 4470636696479570465 -> mod 10 -> 5
self.assertAllEqual([9, 2, 2, 5], result)
def testStringToOneHashBucketLegacyHash(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 1)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
self.assertAllEqual([0, 0, 0], result)
def testStringToHashBucketsLegacyHash(self):
with self.test_session():
input_string = array_ops.placeholder(dtypes.string)
output = string_ops.string_to_hash_bucket(input_string, 10)
result = output.eval(feed_dict={input_string: ['a', 'b', 'c']})
# Hash64('a') -> 2996632905371535868 -> mod 10 -> 8
# Hash64('b') -> 5795986006276551370 -> mod 10 -> 0
# Hash64('c') -> 14899841994519054197 -> mod 10 -> 7
self.assertAllEqual([8, 0, 7], result)
def testStringToOneHashBucketStrongOneHashBucket(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 1, key=[123, 345])
self.assertAllEqual([0, 0, 0], output.eval())
def testStringToHashBucketsStrong(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
output = string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765, 132])
# key = [98765, 132]
# StrongKeyedHash(key, 'a') -> 7157389809176466784 -> mod 10 -> 4
# StrongKeyedHash(key, 'b') -> 15805638358933211562 -> mod 10 -> 2
# StrongKeyedHash(key, 'c') -> 18100027895074076528 -> mod 10 -> 8
self.assertAllEqual([4, 2, 8], output.eval())
def testStringToHashBucketsStrongInvalidKey(self):
with self.test_session():
input_string = constant_op.constant(['a', 'b', 'c'])
with self.assertRaisesOpError('Key must have 2 elements'):
string_ops.string_to_hash_bucket_strong(
input_string, 10, key=[98765]).eval()
if __name__ == '__main__':
test.main()
|
npuichigo/ttsflow
|
third_party/tensorflow/tensorflow/python/kernel_tests/string_to_hash_bucket_op_test.py
|
Python
|
apache-2.0
| 4,034 | 0.010659 |
import os
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='orgwolf',
version='0.1',
packages=find_packages(),
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to conduct Web-based polls.',
long_description=README,
url='https://www.example.com/',
author='Your Name',
author_email='yourname@example.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: X.Y', # replace "X.Y" as appropriate
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
m3wolf/orgwolf
|
setup.py
|
Python
|
gpl-3.0
| 1,198 | 0 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pygments-red',
version='0.2',
description='Pygments lexer for Ruby + Red.',
keywords='pygments ruby red lexer',
license='MIT',
author='Aleksandar Milicevic',
author_email='aleks@csail.mit.edu',
url='https://github.com/aleksandarmilicevic/pygments-red',
packages=find_packages(),
install_requires=['pygments >= 1.4'],
entry_points='''[pygments.lexers]
ruby193=pygments_red:Ruby193Lexer
arby=pygments_red:ARbyLexer
red=pygments_red:RedLexer
sunny=pygments_red:SunnyLexer
handlebars=pygments_red:HandlebarsLexer
html+handlebars=pygments_red:HandlebarsHtmlLexer
slang=pygments_red:SlangLexer
errb=pygments_red:ErrbLexer
ered=pygments_red:EredLexer
redhtml=pygments_red:RedHtmlLexer
[pygments.styles]
redstyle=pygments_red:RedStyle
github=pygments_red:GithubStyle
githubcustom=pygments_red:GithubCustom1Style''',
classifiers=[
],
)
|
aleksandarmilicevic/pygments-red
|
setup.py
|
Python
|
mit
| 1,246 | 0 |
from cardboard import types
from cardboard.ability import (
AbilityNotImplemented, spell, activated, triggered, static
)
from cardboard.cards import card, common, keywords, match
@card("Crovax the Cursed")
def crovax_the_cursed(card, abilities):
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
def crovax_the_cursed():
return AbilityNotImplemented
return crovax_the_cursed, crovax_the_cursed, crovax_the_cursed,
@card("Intruder Alarm")
def intruder_alarm(card, abilities):
def intruder_alarm():
return AbilityNotImplemented
def intruder_alarm():
return AbilityNotImplemented
return intruder_alarm, intruder_alarm,
@card("Cannibalize")
def cannibalize(card, abilities):
def cannibalize():
return AbilityNotImplemented
return cannibalize,
@card("Spike Worker")
def spike_worker(card, abilities):
def spike_worker():
return AbilityNotImplemented
def spike_worker():
return AbilityNotImplemented
return spike_worker, spike_worker,
@card("Contemplation")
def contemplation(card, abilities):
def contemplation():
return AbilityNotImplemented
return contemplation,
@card("Megrim")
def megrim(card, abilities):
def megrim():
return AbilityNotImplemented
return megrim,
@card("Shifting Wall")
def shifting_wall(card, abilities):
def shifting_wall():
return AbilityNotImplemented
def shifting_wall():
return AbilityNotImplemented
return shifting_wall, shifting_wall,
@card("Dauthi Trapper")
def dauthi_trapper(card, abilities):
def dauthi_trapper():
return AbilityNotImplemented
return dauthi_trapper,
@card("Rabid Rats")
def rabid_rats(card, abilities):
def rabid_rats():
return AbilityNotImplemented
return rabid_rats,
@card("Morgue Thrull")
def morgue_thrull(card, abilities):
def morgue_thrull():
return AbilityNotImplemented
return morgue_thrull,
@card("Shard Phoenix")
def shard_phoenix(card, abilities):
def shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
def shard_phoenix():
return AbilityNotImplemented
return shard_phoenix, shard_phoenix, shard_phoenix,
@card("Skyshroud Archer")
def skyshroud_archer(card, abilities):
def skyshroud_archer():
return AbilityNotImplemented
return skyshroud_archer,
@card("Mask of the Mimic")
def mask_of_the_mimic(card, abilities):
def mask_of_the_mimic():
return AbilityNotImplemented
def mask_of_the_mimic():
return AbilityNotImplemented
return mask_of_the_mimic, mask_of_the_mimic,
@card("Provoke")
def provoke(card, abilities):
def provoke():
return AbilityNotImplemented
def provoke():
return AbilityNotImplemented
return provoke, provoke,
@card("Duct Crawler")
def duct_crawler(card, abilities):
def duct_crawler():
return AbilityNotImplemented
return duct_crawler,
@card("Nomads en-Kor")
def nomads_enkor(card, abilities):
def nomads_enkor():
return AbilityNotImplemented
return nomads_enkor,
@card("Change of Heart")
def change_of_heart(card, abilities):
def change_of_heart():
return AbilityNotImplemented
def change_of_heart():
return AbilityNotImplemented
return change_of_heart, change_of_heart,
@card("Overgrowth")
def overgrowth(card, abilities):
def overgrowth():
return AbilityNotImplemented
def overgrowth():
return AbilityNotImplemented
return overgrowth, overgrowth,
@card("Pursuit of Knowledge")
def pursuit_of_knowledge(card, abilities):
def pursuit_of_knowledge():
return AbilityNotImplemented
def pursuit_of_knowledge():
return AbilityNotImplemented
return pursuit_of_knowledge, pursuit_of_knowledge,
@card("Sift")
def sift(card, abilities):
def sift():
return AbilityNotImplemented
return sift,
@card("Portcullis")
def portcullis(card, abilities):
def portcullis():
return AbilityNotImplemented
return portcullis,
@card("Dream Halls")
def dream_halls(card, abilities):
def dream_halls():
return AbilityNotImplemented
return dream_halls,
@card("Ruination")
def ruination(card, abilities):
def ruination():
return AbilityNotImplemented
return ruination,
@card("Horn of Greed")
def horn_of_greed(card, abilities):
def horn_of_greed():
return AbilityNotImplemented
return horn_of_greed,
@card("Hermit Druid")
def hermit_druid(card, abilities):
def hermit_druid():
return AbilityNotImplemented
return hermit_druid,
@card("Spined Sliver")
def spined_sliver(card, abilities):
def spined_sliver():
return AbilityNotImplemented
return spined_sliver,
@card("Dream Prowler")
def dream_prowler(card, abilities):
def dream_prowler():
return AbilityNotImplemented
return dream_prowler,
@card("Spike Soldier")
def spike_soldier(card, abilities):
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
def spike_soldier():
return AbilityNotImplemented
return spike_soldier, spike_soldier, spike_soldier,
@card("Tortured Existence")
def tortured_existence(card, abilities):
def tortured_existence():
return AbilityNotImplemented
return tortured_existence,
@card("Mana Leak")
def mana_leak(card, abilities):
def mana_leak():
return AbilityNotImplemented
return mana_leak,
@card("Mob Justice")
def mob_justice(card, abilities):
def mob_justice():
return AbilityNotImplemented
return mob_justice,
@card("Primal Rage")
def primal_rage(card, abilities):
def primal_rage():
return AbilityNotImplemented
return primal_rage,
@card("Constant Mists")
def constant_mists(card, abilities):
def constant_mists():
return AbilityNotImplemented
def constant_mists():
return AbilityNotImplemented
return constant_mists, constant_mists,
@card("Crystalline Sliver")
def crystalline_sliver(card, abilities):
def crystalline_sliver():
return AbilityNotImplemented
return crystalline_sliver,
@card("Conviction")
def conviction(card, abilities):
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
def conviction():
return AbilityNotImplemented
return conviction, conviction, conviction,
@card("Death Stroke")
def death_stroke(card, abilities):
def death_stroke():
return AbilityNotImplemented
return death_stroke,
@card("Mindwarper")
def mindwarper(card, abilities):
def mindwarper():
return AbilityNotImplemented
def mindwarper():
return AbilityNotImplemented
return mindwarper, mindwarper,
@card("Silver Wyvern")
def silver_wyvern(card, abilities):
def silver_wyvern():
return AbilityNotImplemented
def silver_wyvern():
return AbilityNotImplemented
return silver_wyvern, silver_wyvern,
@card("Mind Peel")
def mind_peel(card, abilities):
def mind_peel():
return AbilityNotImplemented
def mind_peel():
return AbilityNotImplemented
return mind_peel, mind_peel,
@card("Scapegoat")
def scapegoat(card, abilities):
def scapegoat():
return AbilityNotImplemented
def scapegoat():
return AbilityNotImplemented
return scapegoat, scapegoat,
@card("Mind Games")
def mind_games(card, abilities):
def mind_games():
return AbilityNotImplemented
def mind_games():
return AbilityNotImplemented
return mind_games, mind_games,
@card("Flame Wave")
def flame_wave(card, abilities):
def flame_wave():
return AbilityNotImplemented
return flame_wave,
@card("Dungeon Shade")
def dungeon_shade(card, abilities):
def dungeon_shade():
return AbilityNotImplemented
def dungeon_shade():
return AbilityNotImplemented
return dungeon_shade, dungeon_shade,
@card("Convulsing Licid")
def convulsing_licid(card, abilities):
def convulsing_licid():
return AbilityNotImplemented
def convulsing_licid():
return AbilityNotImplemented
return convulsing_licid, convulsing_licid,
@card("Hesitation")
def hesitation(card, abilities):
def hesitation():
return AbilityNotImplemented
return hesitation,
@card("Lab Rats")
def lab_rats(card, abilities):
def lab_rats():
return AbilityNotImplemented
def lab_rats():
return AbilityNotImplemented
return lab_rats, lab_rats,
@card("Samite Blessing")
def samite_blessing(card, abilities):
def samite_blessing():
return AbilityNotImplemented
def samite_blessing():
return AbilityNotImplemented
return samite_blessing, samite_blessing,
@card("Lancers en-Kor")
def lancers_enkor(card, abilities):
def lancers_enkor():
return AbilityNotImplemented
def lancers_enkor():
return AbilityNotImplemented
return lancers_enkor, lancers_enkor,
@card("Heat of Battle")
def heat_of_battle(card, abilities):
def heat_of_battle():
return AbilityNotImplemented
return heat_of_battle,
@card("Contempt")
def contempt(card, abilities):
def contempt():
return AbilityNotImplemented
def contempt():
return AbilityNotImplemented
return contempt, contempt,
@card("Verdant Touch")
def verdant_touch(card, abilities):
def verdant_touch():
return AbilityNotImplemented
def verdant_touch():
return AbilityNotImplemented
return verdant_touch, verdant_touch,
@card("Torment")
def torment(card, abilities):
def torment():
return AbilityNotImplemented
def torment():
return AbilityNotImplemented
return torment, torment,
@card("Lowland Basilisk")
def lowland_basilisk(card, abilities):
def lowland_basilisk():
return AbilityNotImplemented
return lowland_basilisk,
@card("Skeleton Scavengers")
def skeleton_scavengers(card, abilities):
def skeleton_scavengers():
return AbilityNotImplemented
def skeleton_scavengers():
return AbilityNotImplemented
return skeleton_scavengers, skeleton_scavengers,
@card("Ransack")
def ransack(card, abilities):
def ransack():
return AbilityNotImplemented
return ransack,
@card("Mox Diamond")
def mox_diamond(card, abilities):
def mox_diamond():
return AbilityNotImplemented
def mox_diamond():
return AbilityNotImplemented
return mox_diamond, mox_diamond,
@card("Elven Rite")
def elven_rite(card, abilities):
def elven_rite():
return AbilityNotImplemented
return elven_rite,
@card("Hammerhead Shark")
def hammerhead_shark(card, abilities):
def hammerhead_shark():
return AbilityNotImplemented
return hammerhead_shark,
@card("Mortuary")
def mortuary(card, abilities):
def mortuary():
return AbilityNotImplemented
return mortuary,
@card("Jinxed Ring")
def jinxed_ring(card, abilities):
def jinxed_ring():
return AbilityNotImplemented
def jinxed_ring():
return AbilityNotImplemented
return jinxed_ring, jinxed_ring,
@card("Carnassid")
def carnassid(card, abilities):
def carnassid():
return AbilityNotImplemented
def carnassid():
return AbilityNotImplemented
return carnassid, carnassid,
@card("Heartstone")
def heartstone(card, abilities):
def heartstone():
return AbilityNotImplemented
return heartstone,
@card("Rebound")
def rebound(card, abilities):
def rebound():
return AbilityNotImplemented
return rebound,
@card("Mulch")
def mulch(card, abilities):
def mulch():
return AbilityNotImplemented
return mulch,
@card("Skyshroud Falcon")
def skyshroud_falcon(card, abilities):
def skyshroud_falcon():
return AbilityNotImplemented
return skyshroud_falcon,
@card("Rolling Stones")
def rolling_stones(card, abilities):
def rolling_stones():
return AbilityNotImplemented
return rolling_stones,
@card("Spindrift Drake")
def spindrift_drake(card, abilities):
def spindrift_drake():
return AbilityNotImplemented
def spindrift_drake():
return AbilityNotImplemented
return spindrift_drake, spindrift_drake,
@card("Hornet Cannon")
def hornet_cannon(card, abilities):
def hornet_cannon():
return AbilityNotImplemented
return hornet_cannon,
@card("Mogg Bombers")
def mogg_bombers(card, abilities):
def mogg_bombers():
return AbilityNotImplemented
return mogg_bombers,
@card("Smite")
def smite(card, abilities):
def smite():
return AbilityNotImplemented
return smite,
@card("Victual Sliver")
def victual_sliver(card, abilities):
def victual_sliver():
return AbilityNotImplemented
return victual_sliver,
@card("Volrath's Laboratory")
def volraths_laboratory(card, abilities):
def volraths_laboratory():
return AbilityNotImplemented
def volraths_laboratory():
return AbilityNotImplemented
return volraths_laboratory, volraths_laboratory,
@card("Spike Feeder")
def spike_feeder(card, abilities):
def spike_feeder():
return AbilityNotImplemented
def spike_feeder():
return AbilityNotImplemented
def spike_feeder():
return AbilityNotImplemented
return spike_feeder, spike_feeder, spike_feeder,
@card("Wall of Tears")
def wall_of_tears(card, abilities):
def wall_of_tears():
return AbilityNotImplemented
def wall_of_tears():
return AbilityNotImplemented
return wall_of_tears, wall_of_tears,
@card("Evacuation")
def evacuation(card, abilities):
def evacuation():
return AbilityNotImplemented
return evacuation,
@card("Bullwhip")
def bullwhip(card, abilities):
def bullwhip():
return AbilityNotImplemented
return bullwhip,
@card("Mogg Flunkies")
def mogg_flunkies(card, abilities):
def mogg_flunkies():
return AbilityNotImplemented
return mogg_flunkies,
@card("Ensnaring Bridge")
def ensnaring_bridge(card, abilities):
def ensnaring_bridge():
return AbilityNotImplemented
return ensnaring_bridge,
@card("Skyshroud Troopers")
def skyshroud_troopers(card, abilities):
def skyshroud_troopers():
return AbilityNotImplemented
return skyshroud_troopers,
@card("Sword of the Chosen")
def sword_of_the_chosen(card, abilities):
def sword_of_the_chosen():
return AbilityNotImplemented
return sword_of_the_chosen,
@card("Spike Breeder")
def spike_breeder(card, abilities):
def spike_breeder():
return AbilityNotImplemented
def spike_breeder():
return AbilityNotImplemented
def spike_breeder():
return AbilityNotImplemented
return spike_breeder, spike_breeder, spike_breeder,
@card("Reins of Power")
def reins_of_power(card, abilities):
def reins_of_power():
return AbilityNotImplemented
return reins_of_power,
@card("Warrior en-Kor")
def warrior_enkor(card, abilities):
def warrior_enkor():
return AbilityNotImplemented
return warrior_enkor,
@card("Mogg Infestation")
def mogg_infestation(card, abilities):
def mogg_infestation():
return AbilityNotImplemented
return mogg_infestation,
@card("Youthful Knight")
def youthful_knight(card, abilities):
def youthful_knight():
return AbilityNotImplemented
return youthful_knight,
@card("Fanning the Flames")
def fanning_the_flames(card, abilities):
def fanning_the_flames():
return AbilityNotImplemented
def fanning_the_flames():
return AbilityNotImplemented
return fanning_the_flames, fanning_the_flames,
@card("Foul Imp")
def foul_imp(card, abilities):
def foul_imp():
return AbilityNotImplemented
def foul_imp():
return AbilityNotImplemented
return foul_imp, foul_imp,
@card("Hibernation Sliver")
def hibernation_sliver(card, abilities):
def hibernation_sliver():
return AbilityNotImplemented
return hibernation_sliver,
@card("Tidal Warrior")
def tidal_warrior(card, abilities):
def tidal_warrior():
return AbilityNotImplemented
return tidal_warrior,
@card("Spitting Hydra")
def spitting_hydra(card, abilities):
def spitting_hydra():
return AbilityNotImplemented
def spitting_hydra():
return AbilityNotImplemented
return spitting_hydra, spitting_hydra,
@card("Corrupting Licid")
def corrupting_licid(card, abilities):
def corrupting_licid():
return AbilityNotImplemented
def corrupting_licid():
return AbilityNotImplemented
return corrupting_licid, corrupting_licid,
@card("Volrath's Gardens")
def volraths_gardens(card, abilities):
def volraths_gardens():
return AbilityNotImplemented
return volraths_gardens,
@card("Volrath's Shapeshifter")
def volraths_shapeshifter(card, abilities):
def volraths_shapeshifter():
return AbilityNotImplemented
def volraths_shapeshifter():
return AbilityNotImplemented
return volraths_shapeshifter, volraths_shapeshifter,
@card("Mogg Maniac")
def mogg_maniac(card, abilities):
def mogg_maniac():
return AbilityNotImplemented
return mogg_maniac,
@card("Hidden Retreat")
def hidden_retreat(card, abilities):
def hidden_retreat():
return AbilityNotImplemented
return hidden_retreat,
@card("Stronghold Assassin")
def stronghold_assassin(card, abilities):
def stronghold_assassin():
return AbilityNotImplemented
return stronghold_assassin,
@card("Tempting Licid")
def tempting_licid(card, abilities):
def tempting_licid():
return AbilityNotImplemented
def tempting_licid():
return AbilityNotImplemented
return tempting_licid, tempting_licid,
@card("Wall of Blossoms")
def wall_of_blossoms(card, abilities):
def wall_of_blossoms():
return AbilityNotImplemented
def wall_of_blossoms():
return AbilityNotImplemented
return wall_of_blossoms, wall_of_blossoms,
@card("Awakening")
def awakening(card, abilities):
def awakening():
return AbilityNotImplemented
return awakening,
@card("Leap")
def leap(card, abilities):
def leap():
return AbilityNotImplemented
def leap():
return AbilityNotImplemented
return leap, leap,
@card("Revenant")
def revenant(card, abilities):
def revenant():
return AbilityNotImplemented
def revenant():
return AbilityNotImplemented
return revenant, revenant,
@card("Soltari Champion")
def soltari_champion(card, abilities):
def soltari_champion():
return AbilityNotImplemented
def soltari_champion():
return AbilityNotImplemented
return soltari_champion, soltari_champion,
@card("Honor Guard")
def honor_guard(card, abilities):
def honor_guard():
return AbilityNotImplemented
return honor_guard,
@card("Wall of Essence")
def wall_of_essence(card, abilities):
def wall_of_essence():
return AbilityNotImplemented
def wall_of_essence():
return AbilityNotImplemented
return wall_of_essence, wall_of_essence,
@card("Flowstone Mauler")
def flowstone_mauler(card, abilities):
def flowstone_mauler():
return AbilityNotImplemented
def flowstone_mauler():
return AbilityNotImplemented
return flowstone_mauler, flowstone_mauler,
@card("Shaman en-Kor")
def shaman_enkor(card, abilities):
def shaman_enkor():
return AbilityNotImplemented
def shaman_enkor():
return AbilityNotImplemented
return shaman_enkor, shaman_enkor,
@card("Calming Licid")
def calming_licid(card, abilities):
def calming_licid():
return AbilityNotImplemented
def calming_licid():
return AbilityNotImplemented
return calming_licid, calming_licid,
@card("Flowstone Hellion")
def flowstone_hellion(card, abilities):
def flowstone_hellion():
return AbilityNotImplemented
def flowstone_hellion():
return AbilityNotImplemented
return flowstone_hellion, flowstone_hellion,
@card("Wall of Souls")
def wall_of_souls(card, abilities):
def wall_of_souls():
return AbilityNotImplemented
def wall_of_souls():
return AbilityNotImplemented
return wall_of_souls, wall_of_souls,
@card("Flowstone Blade")
def flowstone_blade(card, abilities):
def flowstone_blade():
return AbilityNotImplemented
def flowstone_blade():
return AbilityNotImplemented
return flowstone_blade, flowstone_blade,
@card("Flowstone Shambler")
def flowstone_shambler(card, abilities):
def flowstone_shambler():
return AbilityNotImplemented
return flowstone_shambler,
@card("Bandage")
def bandage(card, abilities):
def bandage():
return AbilityNotImplemented
def bandage():
return AbilityNotImplemented
return bandage, bandage,
@card("Amok")
def amok(card, abilities):
def amok():
return AbilityNotImplemented
return amok,
@card("Spirit en-Kor")
def spirit_enkor(card, abilities):
def spirit_enkor():
return AbilityNotImplemented
def spirit_enkor():
return AbilityNotImplemented
return spirit_enkor, spirit_enkor,
@card("Sliver Queen")
def sliver_queen(card, abilities):
def sliver_queen():
return AbilityNotImplemented
return sliver_queen,
@card("Gliding Licid")
def gliding_licid(card, abilities):
def gliding_licid():
return AbilityNotImplemented
def gliding_licid():
return AbilityNotImplemented
return gliding_licid, gliding_licid,
@card("Stronghold Taskmaster")
def stronghold_taskmaster(card, abilities):
def stronghold_taskmaster():
return AbilityNotImplemented
return stronghold_taskmaster,
@card("Brush with Death")
def brush_with_death(card, abilities):
def brush_with_death():
return AbilityNotImplemented
def brush_with_death():
return AbilityNotImplemented
return brush_with_death, brush_with_death,
@card("Grave Pact")
def grave_pact(card, abilities):
def grave_pact():
return AbilityNotImplemented
return grave_pact,
@card("Wall of Razors")
def wall_of_razors(card, abilities):
def wall_of_razors():
return AbilityNotImplemented
def wall_of_razors():
return AbilityNotImplemented
return wall_of_razors, wall_of_razors,
@card("Temper")
def temper(card, abilities):
def temper():
return AbilityNotImplemented
return temper,
@card("Walking Dream")
def walking_dream(card, abilities):
def walking_dream():
return AbilityNotImplemented
def walking_dream():
return AbilityNotImplemented
return walking_dream, walking_dream,
@card("Invasion Plans")
def invasion_plans(card, abilities):
def invasion_plans():
return AbilityNotImplemented
def invasion_plans():
return AbilityNotImplemented
return invasion_plans, invasion_plans,
@card("Fling")
def fling(card, abilities):
def fling():
return AbilityNotImplemented
def fling():
return AbilityNotImplemented
return fling, fling,
@card("Sacred Ground")
def sacred_ground(card, abilities):
def sacred_ground():
return AbilityNotImplemented
return sacred_ground,
@card("Volrath's Stronghold")
def volraths_stronghold(card, abilities):
def volraths_stronghold():
return AbilityNotImplemented
def volraths_stronghold():
return AbilityNotImplemented
return volraths_stronghold, volraths_stronghold,
@card("Spike Colony")
def spike_colony(card, abilities):
def spike_colony():
return AbilityNotImplemented
def spike_colony():
return AbilityNotImplemented
return spike_colony, spike_colony,
@card("Thalakos Deceiver")
def thalakos_deceiver(card, abilities):
def thalakos_deceiver():
return AbilityNotImplemented
def thalakos_deceiver():
return AbilityNotImplemented
return thalakos_deceiver, thalakos_deceiver,
@card("Warrior Angel")
def warrior_angel(card, abilities):
def warrior_angel():
return AbilityNotImplemented
def warrior_angel():
return AbilityNotImplemented
return warrior_angel, warrior_angel,
@card("Furnace Spirit")
def furnace_spirit(card, abilities):
def furnace_spirit():
return AbilityNotImplemented
def furnace_spirit():
return AbilityNotImplemented
return furnace_spirit, furnace_spirit,
@card("Burgeoning")
def burgeoning(card, abilities):
def burgeoning():
return AbilityNotImplemented
return burgeoning,
@card("Bottomless Pit")
def bottomless_pit(card, abilities):
def bottomless_pit():
return AbilityNotImplemented
return bottomless_pit,
@card("Endangered Armodon")
def endangered_armodon(card, abilities):
def endangered_armodon():
return AbilityNotImplemented
return endangered_armodon,
@card("Acidic Sliver")
def acidic_sliver(card, abilities):
def acidic_sliver():
return AbilityNotImplemented
return acidic_sliver,
@card("Crossbow Ambush")
def crossbow_ambush(card, abilities):
def crossbow_ambush():
return AbilityNotImplemented
return crossbow_ambush,
@card("Shock")
def shock(card, abilities):
def shock():
return AbilityNotImplemented
return shock,
@card("Seething Anger")
def seething_anger(card, abilities):
def seething_anger():
return AbilityNotImplemented
def seething_anger():
return AbilityNotImplemented
return seething_anger, seething_anger,
|
Julian/cardboard
|
cardboard/cards/sets/stronghold.py
|
Python
|
mit
| 26,294 | 0.000038 |
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
UNESCO-IHE 2017
Contact: t.hessels@unesco-ihe.org
Repository: https://github.com/wateraccounting/wa
Module: Collect/JRC
Description:
This module downloads JRC water occurrence data from http://storage.googleapis.com/global-surface-water/downloads/.
Use the JRC.Occurrence function to
download and create a water occurrence image in Gtiff format.
The data represents the period 1984-2015.
Examples:
from wa.Collect import JRC
JRC.Occurrence(Dir='C:/Temp3/', latlim=[41, 45], lonlim=[-8, -5])
"""
from .Occurrence import main as Occurrence
__all__ = ['Occurrence']
__version__ = '0.1'
|
wateraccounting/wa
|
Collect/JRC/__init__.py
|
Python
|
apache-2.0
| 647 | 0.003091 |
drunkenDoodling = {
'ghost': "Salt and iron, and don't forget to burn the corpse",
'wendigo': 'Burn it to death',
'phoenix': 'Use the colt',
'angel': 'Use the angelic blade',
'werewolf': 'Silver knife or bullet to the heart',
'shapeshifter': 'Silver knife or bullet to the heart',
'rugaru': 'Burn it alive',
'reaper': "If it's nasty, you should gank who controls it",
'demon': "Use Ruby's knife, or some Jesus-juice",
'vampire': 'Behead it with a machete',
'dragon': 'You have to find the excalibur for that',
'leviathan': 'Use some Borax, then kill Dick',
'witch': 'They are humans',
'djinn': "Stab it with silver knife dipped in a lamb's blood",
'pagan god': 'It depends on which one it is',
'skinwalker': 'A silver bullet will do it',
'jefferson starship': 'Behead it with a silver blade',
'ghoul': 'Behead it'
}
def bob(what):
return '{}, idjits!'.format(drunkenDoodling.get(
what, 'I have friggin no idea yet'))
|
the-zebulan/CodeWars
|
katas/kyu_7/supernatural.py
|
Python
|
mit
| 1,003 | 0 |
"""
Created on 9 Nov 2012
@author: plish
"""
class TrelloObject(object):
"""
This class is a base object that should be used by all trello objects;
Board, List, Card, etc. It contains methods needed and used by all those
objects and masks the client calls as methods belonging to the object.
"""
def __init__(self, trello_client):
"""
A Trello client, Oauth of HTTP client is required for each object.
"""
super(TrelloObject, self).__init__()
self.client = trello_client
def fetch_json(self, uri_path, http_method='GET', query_params=None, body=None, headers=None):
return self.client.fetch_json(
uri_path=uri_path,
http_method=http_method,
query_params=query_params or {},
body=body,
headers=headers or {}
)
def get_organisations_json(self, base_uri):
return self.fetch_json(base_uri + '/organization')
def get_boards_json(self, base_uri):
return self.fetch_json(base_uri + '/boards')
def get_board_json(self, base_uri):
return self.fetch_json(base_uri + '/board')
def get_lists_json(self, base_uri):
return self.fetch_json(base_uri + '/lists')
def get_list_json(self, base_uri):
return self.fetch_json(base_uri + '/list')
def get_cards_json(self, base_uri):
return self.fetch_json(base_uri + '/cards')
def get_checklist_json(self, base_uri):
return self.fetch_json(base_uri + '/checklists')
def get_members_json(self, base_uri):
return self.fetch_json(base_uri + '/members')
def create_organisation(self, organisation_json, **kwargs):
return self.client.create_organisation(organisation_json, **kwargs)
def create_board(self, board_json, **kwargs):
return self.client.create_board(board_json, **kwargs)
def create_list(self, list_json, **kwargs):
return self.client.create_list(list_json, **kwargs)
def create_card(self, card_json, **kwargs):
return self.client.create_card(card_json, **kwargs)
def create_checklist(self, checklist_json, **kwargs):
return self.client.create_checklist(checklist_json, **kwargs)
def create_member(self, member_json, **kwargs):
return self.client.create_member(member_json, **kwargs)
# Deprecated method names
def fetchJson(self, uri_path, http_method='GET', query_params=None, body=None, headers=None):
return self.fetch_json(uri_path, http_method, query_params or {}, body, headers or {})
def getOrganisationsJson(self, base_uri):
return self.get_organisations_json(base_uri)
def getBoardsJson(self, base_uri):
return self.get_boards_json(base_uri)
def getBoardJson(self, base_uri):
return self.get_board_json(base_uri)
def getListsJson(self, base_uri):
return self.get_lists_json(base_uri)
def getListJson(self, base_uri):
return self.get_list_json(base_uri)
def getCardsJson(self, base_uri):
return self.get_cards_json(base_uri)
def getChecklistsJson(self, base_uri):
return self.get_checklist_json(base_uri)
def getMembersJson(self, base_uri):
return self.get_members_json(base_uri)
def createOrganisation(self, organisation_json, **kwargs):
return self.create_organisation(organisation_json, **kwargs)
def createBoard(self, board_json, **kwargs):
return self.create_board(board_json, **kwargs)
def createList(self, list_json, **kwargs):
return self.create_list(list_json, **kwargs)
def createCard(self, card_json, **kwargs):
return self.create_card(card_json, **kwargs)
def createChecklist(self, checklist_json, **kwargs):
return self.create_checklist(checklist_json, **kwargs)
def createMember(self, member_json, **kwargs):
return self.create_member(member_json, **kwargs)
|
Oksisane/RSS-Bot
|
Trolly-master/trolly/trelloobject.py
|
Python
|
gpl-3.0
| 3,944 | 0.000761 |
# -*- coding: iso-8859-1 -*-
#
# Copyright (C) 2001 - 2020 Massimo Gerardi all rights reserved.
#
# Author: Massimo Gerardi massimo.gerardi@gmail.com
#
# Copyright (c) 2020 Qsistemi.com. All rights reserved.
#
# Viale Giorgio Ribotta, 11 (Roma)
# 00144 Roma (RM) - Italy
# Phone: (+39) 06.87.163
#
#
# Si veda file COPYING per le condizioni di software.
#
# www.qsistemi.com - italy@qsistemi.com
import wx
from cfg import *
import cfg
ttl=_("Ricerca Anagrafica")
def create(parent,cnt):
return SrcTblCF(parent,cnt)
#---------------------------------------------------------------------------
class SrcTblCF(wx.MiniFrame):
def __init__(self, prnt, cnt):
wx.MiniFrame.__init__(self, id=wx.NewId(), name='',
parent=prnt, pos=wx.Point(0, 0),
style=wx.DEFAULT_FRAME_STYLE, title=ttl)
self.SetIcon(wx.Icon(cfg.path_img+"/null.ico", wx.BITMAP_TYPE_ICO))
#self.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL, False))
self.cnt = cnt
self.tcpart = cnt[0]
Nid = wx.NewId()
self.CnAz = prnt.CnAz
#self.font = self.GetFont()
self.__FRM__ = prnt.__FRM__
self.__MDI__ = wx.GetApp().GetPhasisMdi()
self.font=self.__MDI__.font
self.SetFont(self.font)
self.pnl = wx.Panel(id = wx.NewId(), name = 'panel',
parent = self, pos = wx.Point(0, 0))
self.pnl.SetFont(self.font)
self.lc = wx.ListCtrl(self.pnl , Nid,
wx.DLG_PNT(self, 5,5), wx.DLG_SZE(self.pnl , 335,110),
wx.LC_REPORT|wx.LC_HRULES|wx.LC_VRULES)
self.lc.InsertColumn(0, _("Codice"))
self.lc.InsertColumn(1, _("Rag. Soc.1 Cognome"))
self.lc.InsertColumn(2, _("Rag. Soc.2 Nome"))
self.lc.InsertColumn(3, _("Indirizzo"))
self.lc.InsertColumn(4, _("Telefono"))
self.lc.InsertColumn(5, _("Ufficio"))
self.lc.InsertColumn(6, _("Fax"))
self.lc.SetColumnWidth(0, wx.DLG_SZE(self, 30,-1).width)
self.lc.SetColumnWidth(1, wx.DLG_SZE(self, 70,-1).width)
self.lc.SetColumnWidth(2, wx.DLG_SZE(self, 70,-1).width)
self.lc.SetColumnWidth(3, wx.DLG_SZE(self, 100,-1).width)
self.lc.SetColumnWidth(4, wx.DLG_SZE(self, 60,-1).width)
self.lc.SetColumnWidth(5, wx.DLG_SZE(self, 60,-1).width)
self.lc.SetColumnWidth(6, wx.DLG_SZE(self, 60,-1).width)
#self.lc.SetFont(self.font)
rowlc=0
codcf = self.cnt[4].GetValue()
val=self.cnt[2].GetValue().upper()
sql = """ SELECT * FROM tblcf WHERE rag_soc1 like '%s'
AND t_cpart = '%s' AND CODCF = '%s'
ORDER BY rag_soc1 DESC """
valueSql = '%'+val.title()+'%', self.tcpart, int(codcf)
try:
cr = self.CnAz.cursor ()
cr.execute(sql % valueSql)
rows = cr.fetchall()
for row in rows:
for rowlc in range(1):
row_lc = self.lc.GetItemCount()
t_cpart = str(row[0])
cod = str(row[1])
ragsoc1 = str(row[3]).title()
ragsoc2 = str(row[4]).title()
indiriz = str(row[6]).title()
tel_abi = str(row[12])
tel_uff = str(row[13])
fax = str(row[14])
self.lc.InsertStringItem(rowlc, cod)
self.lc.SetStringItem(rowlc, 1, ragsoc1)
self.lc.SetStringItem(rowlc, 2, ragsoc2)
self.lc.SetStringItem(rowlc, 3, indiriz)
self.lc.SetStringItem(rowlc, 4, tel_abi)
self.lc.SetStringItem(rowlc, 5, tel_uff)
self.lc.SetStringItem(rowlc, 6, fax)
self.lc.SetItemData(1,0)
except StandardError, msg:
self.__FRM__.MsgErr("scrtblcf"," Cerca tblcf Error %s" % (msg))
self.CnAz.commit()
self.currentItem = 0
wx.StaticLine(self.pnl , -1, wx.DLG_PNT(self.pnl , 5,115),
wx.DLG_SZE(self.pnl , 300,-1))
self.ok = wx.Button(self.pnl , Nid, cfg.vcconf,
wx.DLG_PNT(self.pnl , 195,120),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
self.canc = wx.Button(self.pnl , Nid, cfg.vccanc,
wx.DLG_PNT(self.pnl , 255,120),
wx.DLG_SZE(self.pnl,cfg.btnSzeLH,cfg.btnSzeV))
for x in self.pnl.GetChildren(): x.SetFont(self.font)
self.canc.SetFocus()
#self.SetFont(self.font)
box_sizer = wx.BoxSizer(wx.VERTICAL)
box_sizer.Add(self.pnl, 0, wx.EXPAND|wx.ALL,0)
self.SetAutoLayout(1)
self.SetSizer(box_sizer)
box_sizer.Fit(self)
self.canc.Bind(wx.EVT_BUTTON, self.Close)
self.ok.Bind(wx.EVT_BUTTON, self.Ok)
self.lc.Bind(wx.EVT_LEFT_DCLICK, self.DblClick)
self.lc.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.LstAct)
self.lc.Bind(wx.EVT_LIST_ITEM_SELECTED, self.LstSlct)
self.Bind(wx.EVT_CLOSE, self.Close)
def Ok(self, event):
self.DblClick(self.currentItem)
def Close(self, event):
#wx.GetApp().GetPhasisMdi().CloseTabObj(self)
self.Destroy()
def getColTxt(self, index, col):
item = self.lc.GetItem(index, col)
return item.GetText()
def DblClick(self, event):
self.cnt[1].SetValue(self.lc.GetItemText(self.currentItem))
self.cnt[2].SetValue(self.getColTxt(self.currentItem, 1))
self.cnt[3](self)
self.Destroy()
def LstSlct(self, event):
self.currentItem = event.m_itemIndex
def LstAct(self, event):
self.currentItem = event.m_itemIndex
self.DblClick(self)
|
phasis/phasis
|
phasis/base/srctblcf.py
|
Python
|
gpl-2.0
| 5,891 | 0.016466 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi Store',
'version': '8.0.1.0.0',
'category': 'Accounting',
'sequence': 14,
'summary': '',
'description': """
Multi Store
===========
The main purpose of this module is to restrict journals access for users on different stores.
This module add a new concept "stores" in some point similar to multicompany.
Similar to multicompany:
* User can have multiple stores available (store_ids)
* User can be active only in one store (store_id) which can be set up in his own preferences
* There is a group "multi store" that gives users the availability to see multi store fields
This module also adds a store_id field on journal:
* If store_id = False then journal can be seen by everyone
* If store_id is set, then journal can be seen by users on that store and parent stores
It also restrict edition, creation and unlink on: account.move, account.invoice and account.voucher.
It is done with the same logic to journal. We do not limitate the "read" of this models because user should need to access those documents, for example, to see partner due.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'account_voucher',
],
'data': [
'views/res_store_view.xml',
'views/res_users_view.xml',
'views/account_view.xml',
'security/multi_store_security.xml',
'security/ir.model.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sysadminmatmoz/ingadhoc
|
multi_store/__openerp__.py
|
Python
|
agpl-3.0
| 2,617 | 0.003821 |
from xmcam import *
from xmconst import *
from time import sleep
CAM_IP = '192.168.1.10'
CAM_PORT = 34567
if __name__ == '__main__':
xm = XMCam(CAM_IP, CAM_PORT, 'admin', 'admin')
login = xm.cmd_login()
print(login)
print(xm.cmd_system_function())
print(xm.cmd_system_info())
print(xm.cmd_channel_title())
print(xm.cmd_OEM_info())
print(xm.cmd_storage_info())
print(xm.cmd_users())
print(xm.cmd_ptz_control(PTZ_LEFT))
sleep(1)
print(xm.cmd_ptz_control(PTZ_LEFT, True))
cfg = xm.cmd_config_export('export.cfg')
print('Config ==>', cfg)
snap = xm.cmd_snap('test.jpg')
print('SNAP ==>', snap)
|
janglapuk/xiongmai-cam-api
|
example.py
|
Python
|
mit
| 660 | 0 |
"""
Store status messages in the database.
"""
from django.db import models
from django.contrib import admin
from django.core.cache import cache
from xmodule_django.models import CourseKeyField
from config_models.models import ConfigurationModel
from config_models.admin import ConfigurationModelAdmin
class GlobalStatusMessage(ConfigurationModel):
"""
Model that represents the current status message.
"""
message = models.TextField(blank=True, null=True)
def full_message(self, course_key):
""" Returns the full status message, including any course-specific status messages. """
cache_key = "status_message.{course_id}".format(course_id=unicode(course_key))
if cache.get(cache_key):
return cache.get(cache_key)
msg = self.message
if course_key:
try:
course_message = self.coursemessage_set.get(course_key=course_key)
# Don't add the message if course_message is blank.
if course_message:
msg = u"{} <br /> {}".format(msg, course_message.message)
except CourseMessage.DoesNotExist:
# We don't have a course-specific message, so pass.
pass
cache.set(cache_key, msg)
return msg
def __unicode__(self):
return "{} - {} - {}".format(self.change_date, self.enabled, self.message)
class CourseMessage(models.Model):
"""
Model that allows the user to specify messages for individual courses.
This is not a ConfigurationModel because using it's not designed to support multiple configurations at once,
which would be problematic if separate courses need separate error messages.
"""
global_message = models.ForeignKey(GlobalStatusMessage)
course_key = CourseKeyField(max_length=255, blank=True, db_index=True)
message = models.TextField(blank=True, null=True)
def __unicode__(self):
return unicode(self.course_key)
admin.site.register(GlobalStatusMessage, ConfigurationModelAdmin)
admin.site.register(CourseMessage)
|
ahmadiga/min_edx
|
common/djangoapps/status/models.py
|
Python
|
agpl-3.0
| 2,090 | 0.002871 |
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print(f"X_train.shape = {X_train.shape}")
print(f"X_train.format = {X_train.format}")
print(f"X_train.dtype = {X_train.dtype}")
print(f"X_train density = {X_train.nnz / np.product(X_train.shape)}")
print(f"y_train {y_train.shape}")
print(f"X_test {X_test.shape}")
print(f"X_test.format = {X_test.format}")
print(f"X_test.dtype = {X_test.dtype}")
print(f"y_test {y_test.shape}")
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
kevin-intel/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
Python
|
bsd-3-clause
| 3,292 | 0 |
"""Queue item for deep analysis by irwin"""
from default_imports import *
from modules.queue.Origin import Origin
from modules.game.Game import PlayerID
from datetime import datetime
import pymongo
from pymongo.collection import Collection
IrwinQueue = NamedTuple('IrwinQueue', [
('id', PlayerID),
('origin', Origin)
])
class IrwinQueueBSONHandler:
@staticmethod
def reads(bson: Dict) -> IrwinQueue:
return IrwinQueue(
id=bson['_id'],
origin=bson['origin'])
@staticmethod
def writes(irwinQueue: IrwinQueue) -> Dict:
return {
'_id': irwinQueue.id,
'origin': irwinQueue.origin,
'date': datetime.now()
}
class IrwinQueueDB(NamedTuple('IrwinQueueDB', [
('irwinQueueColl', Collection)
])):
def write(self, irwinQueue: IrwinQueue):
self.irwinQueueColl.update_one(
{'_id': irwinQueue.id},
{'$set': IrwinQueueBSONHandler.writes(irwinQueue)},
upsert=True)
def removePlayerId(self, playerId: PlayerID):
self.irwinQueueColl.remove({'_id': playerId})
def nextUnprocessed(self) -> Opt[IrwinQueue]:
irwinQueueBSON = self.irwinQueueColl.find_one_and_delete(
filter={},
sort=[("date", pymongo.ASCENDING)])
return None if irwinQueueBSON is None else IrwinQueueBSONHandler.reads(irwinQueueBSON)
|
clarkerubber/irwin
|
modules/queue/IrwinQueue.py
|
Python
|
agpl-3.0
| 1,421 | 0.004222 |
#!/usr/bin/env python
import unittest
from SDDetector.Entities.Gene import Gene
from SDDetector.Entities.Transcript import Transcript
from SDDetector.Entities.CDS import CDS
from SDDetector.Parser.Gff.GffGeneParser import GffGeneParser
class TestGffGeneParser(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_getAllGenes(self):
"""Test getAllGenes method"""
iGffGeneParser = GffGeneParser("test-data/gene.gff3")
lGenes = [Gene('G00001','Chr1',23988,24919,-1,[Transcript('G00001.1','Chr1',23988,24919,-1,'G00001',[CDS('G00001.1_cds_1','Chr1',23988,24083, -1, 'G00001.1'),CDS('G00001.1_cds_1','Chr1',24274,24427,-1,'G00001.1'),CDS('G00001.1_cds_1','Chr1',24489,24919,-1,'G00001.1')])])]
self.assertEqual(iGffGeneParser.getAllGenes()[0],lGenes[0])
def test_getAllGenes(self):
"""Test getAllGenes method"""
# iGffGeneParser = GffGeneParser("/home/nlapalu/Workspace/Github/SDDetector/data/arabidopsis/TAIR10.new.gff3")
pass
# lGenes = [Gene('G00001','Chr1',23988,24919,-1,[Transcript('G00001.1','Chr1',23988,24919,-1,'G00001',[CDS('G00001.1_cds_1','Chr1',23988,24083, -1, 'G00001.1'),CDS('G00001.1_cds_1','Chr1',24274,24427,-1,'G00001.1'),CDS('G00001.1_cds_1','Chr1',24489,24919,-1,'G00001.1')])])]
# self.assertEqual(iGffGeneParser.getAllGenes()[0],lGenes[0])
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestGffGeneParser)
unittest.TextTestRunner(verbosity=2).run(suite)
|
nlapalu/SDDetector
|
tests/test_GffGeneParser.py
|
Python
|
gpl-3.0
| 1,549 | 0.020013 |
import numbers
import numpy
import cupy
###############################################################################
# Private utility functions.
def _round_if_needed(arr, dtype):
"""Rounds arr inplace if the destination dtype is an integer.
"""
if cupy.issubdtype(dtype, cupy.integer):
arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330)
def _slice_at_axis(sl, axis):
"""Constructs a tuple of slices to slice an array in the given dimension.
Args:
sl(slice): The slice for the given dimension.
axis(int): The axis to which `sl` is applied. All other dimensions are
left "unsliced".
Returns:
tuple of slices: A tuple with slices matching `shape` in length.
"""
return (slice(None),) * axis + (sl,) + (Ellipsis,)
def _view_roi(array, original_area_slice, axis):
"""Gets a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Args:
array(cupy.ndarray): The array with the region of interest.
original_area_slice(tuple of slices): Denotes the area with original
values of the unpadded array.
axis(int): The currently padded dimension assuming that `axis` is padded
before `axis` + 1.
Returns:
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl]
def _pad_simple(array, pad_width, fill_value=None):
"""Pads an array on all sides with either a constant or undefined values.
Args:
array(cupy.ndarray): Array to grow.
pad_width(sequence of tuple[int, int]): Pad width on both sides for each
dimension in `arr`.
fill_value(scalar, optional): If provided the padded area is
filled with this value, otherwise the pad area left undefined.
(Default value = None)
"""
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
padded = cupy.empty(new_shape, dtype=array.dtype, order=order)
if fill_value is not None:
padded.fill(fill_value)
# Copy old array into correct space
original_area_slice = tuple(
slice(left, left + size)
for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded, original_area_slice
def _set_pad_area(padded, axis, width_pair, value_pair):
"""Set an empty-padded area in given dimension.
"""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1]
def _get_edges(padded, axis, width_pair):
"""Retrieves edge values from an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the edges are considered.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""Constructs linear ramps for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the ramps are constructed.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
end_value_pair((scalar, scalar)): End values for the linear ramps which
form the edge of the fully padded array. These values are included in
the linear ramps.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp = cupy.linspace(
start=end_value_pair[0],
# squeeze axis replaced by linspace
stop=edge_pair[0].squeeze(axis),
num=width_pair[0],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
right_ramp = cupy.linspace(
start=end_value_pair[1],
# squeeze axis replaced by linspace
stop=edge_pair[1].squeeze(axis),
num=width_pair[1],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""Calculates a statistic for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the statistic is calculated.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
length_pair(2-element sequence of None or int): Gives the number of
values in valid area from each side that is taken into account when
calculating the statistic. If None the entire valid area in `padded`
is considered.
stat_func(function): Function to compute statistic. The expected
signature is
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
"""
# Calculate indices of the edges of the area with original values
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
# as well as its length
max_length = right_index - left_index
# Limit stat_lengths to max_length
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis
)
left_chunk = padded[left_slice]
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
_round_if_needed(left_stat, padded.dtype)
if left_length == right_length == max_length:
# return early as right_stat must be identical to left_stat
return left_stat, left_stat
# Calculate statistic for the right side
right_slice = _slice_at_axis(
slice(right_index - right_length, right_index), axis
)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""Pads an `axis` of `arr` using reflection.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
method(str): Controls method of reflection; options are 'even' or 'odd'.
include_edge(bool, optional): If true, edge value is included in
reflection, otherwise the edge value forms the symmetric axis to the
reflection. (Default value = False)
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, left_pad)
# Slice right to left, stop on or next to edge, start relative to stop
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
# Insert chunk into padded area
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
# Adjust pointer to left edge for next iteration
left_pad -= chunk_length
if right_pad > 0:
# Pad with reflected values on right side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, right_pad)
# Slice right to left, start on or next to edge, stop relative to start
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == 'odd':
# Negate chunk and align with edge
edge_slice = _slice_at_axis(
slice(-right_pad - 1, -right_pad), axis
)
right_chunk = 2 * padded[edge_slice] - right_chunk
# Insert chunk into padded area
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
# Adjust pointer to right edge for next iteration
right_pad -= chunk_length
return left_pad, right_pad
def _set_wrap_both(padded, axis, width_pair):
"""Pads an `axis` of `arr` with wrapped values.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
# Each time the pad area shrinks on both sides which is communicated with
# these variables.
new_left_pad = 0
new_right_pad = 0
if left_pad > 0:
# Pad with wrapped values on left side
# First slice chunk from right side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
# pad area
right_slice = _slice_at_axis(
slice(
-right_pad - min(period, left_pad),
-right_pad if right_pad != 0 else None,
),
axis,
)
right_chunk = padded[right_slice]
if left_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
new_left_pad = left_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if right_pad > 0:
# Pad with wrapped values on right side
# First slice chunk from left side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
# pad area
left_slice = _slice_at_axis(
slice(left_pad, left_pad + min(period, right_pad)), axis
)
left_chunk = padded[left_slice]
if right_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(
slice(-right_pad, -right_pad + period), axis
)
new_right_pad = right_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
padded[pad_area] = left_chunk
return new_left_pad, new_right_pad
def _as_pairs(x, ndim, as_index=False):
"""Broadcasts `x` to an array with shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Args:
x(scalar or array-like, optional): The object to broadcast to the shape
(`ndim`, 2).
ndim(int): Number of pairs the broadcasted `x` will have.
as_index(bool, optional): If `x` is not None, try to round each
element of `x` to an integer (dtype `cupy.intp`) and ensure every
element is positive. (Default value = False)
Returns:
nested iterables, shape (`ndim`, 2): The broadcasted version of `x`.
"""
if x is None:
# Pass through None as a special case, otherwise cupy.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
elif isinstance(x, numbers.Number):
if as_index:
x = round(x)
return ((x, x),) * ndim
x = numpy.array(x)
if as_index:
x = numpy.asarray(numpy.round(x), dtype=numpy.intp)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `numpy.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
x_view = x.view()
x_view.shape = (ndim, 2)
return x_view.tolist()
# def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
# return (array,)
###############################################################################
# Public functions
# @array_function_dispatch(_pad_dispatcher, module='numpy')
def pad(array, pad_width, mode='constant', **kwargs):
"""Pads an array with specified widths and values.
Args:
array(cupy.ndarray): The array to pad.
pad_width(sequence, array_like or int): Number of values padded to the
edges of each axis. ((before_1, after_1), ... (before_N, after_N))
unique pad widths for each axis. ((before, after),) yields same
before and after pad for each axis. (pad,) or int is a shortcut for
before = after = pad width for all axes. You cannot specify
``cupy.ndarray``.
mode(str or function, optional): One of the following string values or a
user supplied function
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the array edge
value.
'maximum'
Pads with the maximum value of all or part of the vector along
each axis.
'mean'
Pads with the mean value of all or part of the vector along each
axis.
'median'
Pads with the median value of all or part of the vector along
each axis. (Not Implemented)
'minimum'
Pads with the minimum value of all or part of the vector along
each axis.
'reflect'
Pads with the reflection of the vector mirrored on the first and
last values of the vector along each axis.
'symmetric'
Pads with the reflection of the vector mirrored along the edge
of the array.
'wrap'
Pads with the wrap of the vector along the axis. The first
values are used to pad the end and the end values are used to
pad the beginning.
'empty'
Pads with undefined values.
<function>
Padding function, see Notes.
stat_length(sequence or int, optional): Used in 'maximum', 'mean',
'median', and 'minimum'. Number of values at edge of each axis used
to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis. ((before, after),) yields same before and
after statistic lengths for each axis. (stat_length,) or int is a
shortcut for before = after = statistic length for all axes.
Default is ``None``, to use the entire axis. You cannot specify
``cupy.ndarray``.
constant_values(sequence or scalar, optional): Used in 'constant'. The
values to set the padded values for each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
end_values(sequence or scalar, optional): Used in 'linear_ramp'. The
values used for the ending value of the linear_ramp and that will
form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end
values for each axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
reflect_type({'even', 'odd'}, optional): Used in 'reflect', and
'symmetric'. The 'even' style is the default with an unaltered
reflection around the edge value. For the 'odd' style, the extended
part of the array is created by subtracting the reflected values from
two times the edge value.
Returns:
cupy.ndarray: Padded array with shape extended by ``pad_width``.
.. note::
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should modify a rank 1 array in-place.
It has the following signature:
``padding_func(vector, iaxis_pad_width, iaxis, kwargs)``
where
vector (cupy.ndarray)
A rank 1 array already padded with zeros. Padded values are
``vector[:iaxis_pad_width[0]]`` and
``vector[-iaxis_pad_width[1]:]``.
iaxis_pad_width (tuple)
A 2-tuple of ints, ``iaxis_pad_width[0]`` represents the number of
values padded at the beginning of vector where
``iaxis_pad_width[1]`` represents the number of values padded at
the end of vector.
iaxis (int)
The axis currently being calculated.
kwargs (dict)
Any keyword arguments the function requires.
Examples
--------
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'constant', constant_values=(4, 6))
array([4, 4, 1, ..., 6, 6, 6])
>>> cupy.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> cupy.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> cupy.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> cupy.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = cupy.array([[1, 2], [3, 4]])
>>> cupy.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> cupy.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> cupy.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> cupy.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> cupy.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def pad_with(vector, pad_width, iaxis, kwargs):
... pad_value = kwargs.get('padder', 10)
... vector[:pad_width[0]] = pad_value
... vector[-pad_width[1]:] = pad_value
>>> a = cupy.arange(6)
>>> a = a.reshape((2, 3))
>>> cupy.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
>>> cupy.pad(a, 2, pad_with, padder=100)
array([[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 0, 1, 2, 100, 100],
[100, 100, 3, 4, 5, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100]])
"""
if isinstance(pad_width, numbers.Integral):
pad_width = ((pad_width, pad_width),) * array.ndim
else:
pad_width = numpy.asarray(pad_width)
if not pad_width.dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
# Broadcast to shape (array.ndim, 2)
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
if callable(mode):
# Old behavior: Use user-supplied function with numpy.apply_along_axis
function = mode
# Create a new zero padded array
padded, _ = _pad_simple(array, pad_width, fill_value=0)
# And apply along each axis
for axis in range(padded.ndim):
# Iterate using ndindex as in apply_along_axis, but assuming that
# function operates inplace on the padded array.
# view with the iteration axis at the end
view = cupy.moveaxis(padded, axis, -1)
# compute indices for the iteration axes, and append a trailing
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
inds = numpy.ndindex(view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
function(view[ind], pad_width[axis], axis, kwargs)
return padded
# Make sure that no unsupported keywords were passed for the current mode
allowed_kwargs = {
'empty': [],
'edge': [],
'wrap': [],
'constant': ['constant_values'],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
# 'median': ['stat_length'],
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
}
try:
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
except KeyError:
raise ValueError("mode '{}' is not supported".format(mode))
if unsupported_kwargs:
raise ValueError(
"unsupported keyword arguments for mode '{}': {}".format(
mode, unsupported_kwargs
)
)
if mode == 'constant':
values = kwargs.get('constant_values', 0)
if isinstance(values, numbers.Number) and values == 0 and (
array.ndim == 1 or array.size < 4e6):
# faster path for 1d arrays or small n-dimensional arrays
return _pad_simple(array, pad_width, 0)[0]
stat_functions = {
'maximum': cupy.max,
'minimum': cupy.min,
'mean': cupy.mean,
# 'median': cupy.median,
}
# Create array with final shape and original values
# (padded area is undefined)
padded, original_area_slice = _pad_simple(array, pad_width)
# And prepare iteration over all dimensions
# (zipping may be more readable than using enumerate)
axes = range(padded.ndim)
if mode == 'constant':
values = _as_pairs(values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, values):
roi = _view_roi(padded, original_area_slice, axis)
_set_pad_area(roi, axis, width_pair, value_pair)
elif mode == 'empty':
pass # Do nothing as _pad_simple already returned the correct result
elif array.size == 0:
# Only modes 'constant' and 'empty' can extend empty axes, all other
# modes depend on `array` not being empty
# -> ensure every empty axis is only 'padded with 0'
for axis, width_pair in zip(axes, pad_width):
if array.shape[axis] == 0 and any(width_pair):
raise ValueError(
"can't extend empty axis {} using modes other than "
"'constant' or 'empty'".format(axis)
)
# passed, don't need to do anything more as _pad_simple already
# returned the correct result
elif mode == 'edge':
for axis, width_pair in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
edge_pair = _get_edges(roi, axis, width_pair)
_set_pad_area(roi, axis, width_pair, edge_pair)
elif mode == 'linear_ramp':
end_values = kwargs.get('end_values', 0)
end_values = _as_pairs(end_values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
roi = _view_roi(padded, original_area_slice, axis)
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
_set_pad_area(roi, axis, width_pair, ramp_pair)
elif mode in stat_functions:
func = stat_functions[mode]
length = kwargs.get('stat_length', None)
length = _as_pairs(length, padded.ndim, as_index=True)
for axis, width_pair, length_pair in zip(axes, pad_width, length):
roi = _view_roi(padded, original_area_slice, axis)
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
_set_pad_area(roi, axis, width_pair, stat_pair)
elif mode in {'reflect', 'symmetric'}:
method = kwargs.get('reflect_type', 'even')
include_edge = True if mode == 'symmetric' else False
for axis, (left_index, right_index) in zip(axes, pad_width):
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
edge_pair = _get_edges(padded, axis, (left_index, right_index))
_set_pad_area(
padded, axis, (left_index, right_index), edge_pair
)
continue
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with reflected
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_reflect_both(
roi, axis, (left_index, right_index), method, include_edge
)
elif mode == 'wrap':
for axis, (left_index, right_index) in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with wrapped
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_wrap_both(
roi, axis, (left_index, right_index)
)
return padded
|
cupy/cupy
|
cupy/_padding/pad.py
|
Python
|
mit
| 29,422 | 0 |
from django.core.management.base import BaseCommand, CommandError
from corehq.elastic import get_es_new
from corehq.pillows.utils import get_all_expected_es_indices
class Command(BaseCommand):
help = "Update dynamic settings for existing elasticsearch indices."
def add_arguments(self, parser):
parser.add_argument(
'--noinput',
action='store_true',
dest='noinput',
default=False,
help='Skip important confirmation warnings.'
)
def handle(self, **options):
noinput = options.pop('noinput')
es_indices = list(get_all_expected_es_indices())
to_update = []
es = get_es_new()
for index_info in es_indices:
old_settings = es.indices.get_settings(index=index_info.index)
old_number_of_replicas = int(
old_settings[index_info.index]['settings']['index']['number_of_replicas']
)
new_number_of_replicas = index_info.meta['settings']['number_of_replicas']
if old_number_of_replicas != new_number_of_replicas:
print("{} [{}]:\n Number of replicas changing from {!r} to {!r}".format(
index_info.alias, index_info.index, old_number_of_replicas, new_number_of_replicas))
to_update.append((index_info, {
'number_of_replicas': new_number_of_replicas,
}))
if not to_update:
print("There is nothing to update.")
return
if (noinput or _confirm(
"Confirm that you want to update all the settings above?")):
for index_info, settings in to_update:
mapping_res = es.indices.put_settings(index=index_info.index, body=settings)
if mapping_res.get('acknowledged', False):
print("{} [{}]:\n Index settings successfully updated".format(
index_info.alias, index_info.index))
else:
print(mapping_res)
def _confirm(message):
if input(
'{} [y/n]'.format(message)
).lower() == 'y':
return True
else:
raise CommandError('abort')
|
dimagi/commcare-hq
|
corehq/ex-submodules/pillowtop/management/commands/update_es_settings.py
|
Python
|
bsd-3-clause
| 2,244 | 0.003119 |
# Copyright 2019 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Composer models of Kinova robots."""
from dm_control.entities.manipulators.kinova.jaco_arm import JacoArm
from dm_control.entities.manipulators.kinova.jaco_hand import JacoHand
|
deepmind/dm_control
|
dm_control/entities/manipulators/kinova/__init__.py
|
Python
|
apache-2.0
| 848 | 0 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ["preview"],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_vpn_gateway
description:
- Represents a VPN gateway running in GCP. This virtual device is managed by Google,
but used only by you.
short_description: Creates a GCP TargetVpnGateway
version_added: 2.7
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
description:
description:
- An optional description of this resource.
required: false
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
network:
description:
- The network this VPN gateway is accepting traffic for.
- 'This field represents a link to a Network resource in GCP. It can be specified
in two ways. You can add `register: name-of-resource` to a gcp_compute_network
task and then set this network field to "{{ name-of-resource }}" Alternatively,
you can set this network to a dictionary with the selfLink key where the value
is the selfLink of your Network'
required: true
region:
description:
- The region this gateway should sit in.
required: true
extends_documentation_fragment: gcp
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways)'
'''
EXAMPLES = '''
- name: create a address
gcp_compute_address:
name: "address-vpngateway"
region: us-west1
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: address
- name: create a network
gcp_compute_network:
name: "network-vpngateway"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: network
- name: create a target vpn gateway
gcp_compute_target_vpn_gateway:
name: "test_object"
region: us-west1
network: "{{ network }}"
project: "test_project"
auth_kind: "serviceaccount"
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
network:
description:
- The network this VPN gateway is accepting traffic for.
returned: success
type: dict
tunnels:
description:
- A list of references to VpnTunnel resources associated to this VPN gateway.
returned: success
type: list
forwardingRules:
description:
- A list of references to the ForwardingRule resources associated to this VPN gateway.
returned: success
type: list
region:
description:
- The region this gateway should sit in.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
network=dict(required=True, type='dict'),
region=dict(required=True, type='str')
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetVpnGateway'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind):
module.fail_json(msg="TargetVpnGateway cannot be edited")
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetVpnGateway',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink')
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/targetVpnGateways".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'id': response.get(u'id'),
u'network': replace_resource_dict(module.params.get(u'network', {}), 'selfLink'),
u'tunnels': response.get(u'tunnels'),
u'forwardingRules': response.get(u'forwardingRules')
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetVpnGateway')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], 'message')
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation')
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
|
dlazz/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_target_vpn_gateway.py
|
Python
|
gpl-3.0
| 11,232 | 0.003205 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub implementation of the modules service."""
from google.appengine.api import apiproxy_stub
from google.appengine.api import request_info
from google.appengine.api.modules import modules_service_pb
from google.appengine.runtime import apiproxy_errors
class ModulesServiceStub(apiproxy_stub.APIProxyStub):
_ACCEPTS_REQUEST_ID = True
THREADSAFE = True
def __init__(self, request_data):
super(ModulesServiceStub, self).__init__('modules',
request_data=request_data)
def _GetModuleFromRequest(self, request, request_id):
dispatcher = self.request_data.get_dispatcher()
if request.has_module():
module = request.module()
else:
module = self.request_data.get_module(request_id)
return module, dispatcher
def _GetModuleAndVersionFromRequest(self, request, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
if request.has_version():
version = request.version()
else:
version = self.request_data.get_version(request_id)
if version not in dispatcher.get_versions(module):
version = dispatcher.get_default_version(module)
return module, version, dispatcher
def _Dynamic_GetModules(self, request, response, request_id):
dispatcher = self.request_data.get_dispatcher()
for module in dispatcher.get_module_names():
response.add_module(module)
def _Dynamic_GetVersions(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
for version in dispatcher.get_versions(module):
response.add_version(version)
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetDefaultVersion(self, request, response, request_id):
module, dispatcher = self._GetModuleFromRequest(request, request_id)
try:
response.set_version(dispatcher.get_default_version(module))
except request_info.ModuleDoesNotExistError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
def _Dynamic_GetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_instances(dispatcher.get_num_instances(module, version))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_SetNumInstances(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.set_num_instances(module, version, request.instances())
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
def _Dynamic_StartModule(self, request, response, request_id):
module = request.module()
version = request.version()
dispatcher = self.request_data.get_dispatcher()
try:
dispatcher.start_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStartedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_StopModule(self, request, response, request_id):
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
dispatcher.stop_module(module, version)
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError,
request_info.NotSupportedWithAutoScalingError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_VERSION)
except request_info.ModuleAlreadyStoppedError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.UNEXPECTED_STATE)
def _Dynamic_GetHostname(self, request, response, request_id):
if request.has_instance():
instance = request.instance()
else:
instance = None
try:
module, version, dispatcher = self._GetModuleAndVersionFromRequest(
request, request_id)
response.set_hostname(dispatcher.get_hostname(module, version, instance))
except (request_info.ModuleDoesNotExistError,
request_info.VersionDoesNotExistError):
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_MODULE)
except request_info.InvalidInstanceIdError:
raise apiproxy_errors.ApplicationError(
modules_service_pb.ModulesServiceError.INVALID_INSTANCES)
|
yencarnacion/jaikuengine
|
.google_appengine/google/appengine/api/modules/modules_stub.py
|
Python
|
apache-2.0
| 5,933 | 0.007079 |
# -*- coding: utf-8 -*-
#
# adlib.py
#
# A lot of help from:
# http://marcitland.blogspot.com/2011/02/python-active-directory-linux.html
# import sys is my friend!
import sys
import logging
import ldap
from person import Person
#import netrc
import base64,zlib
import ldap.modlist as modlist
from secure import ADurl, adusername, adpassword
import time
#~ Create a AD connection with clean up. Must be called
#~ 'with' statement
#~ usage: with ADconnection as adc
# setting module logger
logger = logging.getLogger('pyADAP.adlib')
class ADconnection(object):
def __enter__(self):
#LDAP Connection
try:
# Fix MS Issues
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
ldap.set_option(ldap.OPT_REFERRALS,0)
self.ldap_connection = ldap.initialize(ADurl)
self.ldap_connection.simple_bind_s(adusername, adpassword)
except ldap.LDAPError, e:
sys.stderr.write('Error connecting to LDAP server: ' + str(e) + '\n')
# Needs to fail gracefully such as just dump to bit bucket
#sys.exit(1)
logger.info('Error connecting to LDAP server')
raise RuntimeError('Error connecting to LDAP server')
logger.debug("Connected to AD")
return self.ldap_connection
def __init__(self):
return None
def __exit__(self, type, value, traceback):
self.close()
def close(self):
logger.debug("Disconnecting from AD")
self.ldap_connection.unbind_s()
class adlib(object):
def __init__(self,imsperson):
self.perrec = imsperson
#Base dn. Outside config????
self.base_dn = 'dc=sbcc,dc=local'
self.dn = None
self.inADFlag = None
def inAD(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
user_results = ldapconn.search_s(self.base_dn, ldap.SCOPE_SUBTREE,searchfilter)
dn = user_results[0][0]
if dn is None:
return False
except ldap.LDAPError, error_message:
#print "error finding username: %S" % error_message
self.inADFlag = False
return False
except:
self.inADFlag = False
return False
self.inADFlag = True
return True
def chgPwd(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
logger.debug(searchfilter)
user_results=ldapconn.search_s(self.base_dn,ldap.SCOPE_SUBTREE,searchfilter)
logger.debug(user_results)
dn = user_results[0][0]
#~ print dn
if dn <> None:
#placeholder for logging
#print 'updating ' + user['username'],time.ctime()
adpass = ('"%s"' % self.perrec.password).encode("utf-16-le")
#adpass = base64.b64encode(adpass)
# Update Password
mod_attrs = [( ldap.MOD_REPLACE, 'unicodePwd', adpass ),( ldap.MOD_REPLACE, 'unicodePwd', adpass)]
# Update Role
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeType', str(self.perrec.primaryRole)) )
#Update Knumber
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeID', str(self.perrec.knumber)) )
mod_attrs.append( (ldap.MOD_REPLACE, 'employeeNumber', str(self.perrec.knumber).strip('K')) )
mod_attrs.append( (ldap.MOD_REPLACE, 'mail', str(self.perrec.userid) + '@pipeline.sbcc.edu') )
#Reenable user
#print user_results[0][1]['userAccountControl'][0]
UC = int(user_results[0][1]['userAccountControl'][0])
if UC & (1<<1):
UC = UC & ~(1 << 1)
UCattrib = (ldap.MOD_REPLACE, 'userAccountControl', str(UC))
#mod_attrs.append(UCattrib)
#print mod_attrs
ldapconn.modify_s( dn, mod_attrs )
logger.info('Updated password for %s', str(cn))
#work on logging
except ldap.LDAPError, error_message:
#~ print "error finding username: %s" % error_message
return False
def enableUser(self,cn=None):
if cn is None:
cn=self.perrec.userid
#instatiate class. Why? Who knows...
ad = ADconnection()
with ad as ldapconn:
try:
searchfilter = ('(&(objectCategory=person)(&(objectClass=user)(sAMAccountName=%s)))' % cn)
logger.debug(searchfilter)
user_results=ldapconn.search_s(self.base_dn,ldap.SCOPE_SUBTREE,searchfilter)
logger.debug(user_results)
dn = user_results[0][0]
#~ print dn
if dn <> None:
#placeholder for logging
#print 'updating ' + user['username'],time.ctime()
#print user_results[0][1]['userAccountControl'][0]
UC = int(user_results[0][1]['userAccountControl'][0])
if UC & (1<<1):
UC = UC & ~(1 << 1)
UCattrib = (ldap.MOD_REPLACE, 'userAccountControl', str(UC))
#mod_attrs.append(UCattrib)
mod_attrs = [(UCattrib)]
#print mod_attrs
ldapconn.modify_s( dn, mod_attrs )
logger.info('Enabled: %s', str(cn))
#work on logging
except ldap.LDAPError, error_message:
#~ print "error finding username: %s" % error_message
return False
def addUser(self):
# Build User
if self.perrec.ADContainer == 'noOU':
logger.debug("User does not have container: %s" % self.perrec.userid)
logger.error("AD Account not created for: %s" % self.perrec.userid)
#raise ValueError('User not create')
return False
user_dn = 'cn=' + self.perrec.userid + ',' + self.perrec.ADContainer
logger.info('User DN for new user: %s', user_dn)
user_attrs = {}
user_attrs['objectClass'] = \
['top', 'person', 'organizationalPerson', 'user']
user_attrs['cn'] = str(self.perrec.userid)
user_attrs['userPrincipalName'] = str(self.perrec.userid) + '@' + 'sbcc.local'
user_attrs['sAMAccountName'] = str(self.perrec.userid)
user_attrs['givenName'] = str(self.perrec.fname.encode("utf-8"))
user_attrs['sn'] = str(self.perrec.lname.encode("utf-8"))
user_attrs['displayName'] = str(self.perrec.displayName.encode("utf-8"))
user_attrs['userAccountControl'] = '514'
user_attrs['mail'] = str(self.perrec.userid) + '@pipeline.sbcc.edu'
user_attrs['employeeID'] = str(self.perrec.knumber)
user_attrs['employeeNumber'] = str(self.perrec.knumber).strip('K')
user_ldif = modlist.addModlist(user_attrs)
ad = ADconnection()
with ad as ldapconn:
logger.info('Adding users: %s', user_dn)
ldapconn.add_s(user_dn,user_ldif)
time.sleep(1)
logger.info('Adding membership: %s', user_dn)
add_member = [(ldap.MOD_ADD, 'member', str(user_dn))]
ldapconn.modify_s(self.perrec.ADMemberOf,add_member)
time.sleep(1)
adpass = ('"%s"' % self.perrec.password).encode("utf-16-le")
#adpass = base64.b64encode(adpass)
# Update Password
mod_attrs = [( ldap.MOD_REPLACE, 'unicodePwd', adpass ),( ldap.MOD_REPLACE, 'unicodePwd', adpass)]
logger.info('Setting pass: %s', user_dn)
ldapconn.modify_s(user_dn,mod_attrs)
time.sleep(1)
# 512 will set user account to enabled
mod_acct = [(ldap.MOD_REPLACE, 'userAccountControl', '512')]
logger.info('Trying to enable user: %s', user_dn)
logger.info('userAccountControl: %s', mod_acct)
ldapconn.modify_s(user_dn,mod_acct)
#Enable Account
#self.chgPwd()
#self.enableUser()
logger.info('User added to AD: %s', user_dn)
|
msghens/pyADAP
|
adlib.py
|
Python
|
mit
| 8,216 | 0.041504 |
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib import messages
from models import UserVote
from forms import UserVoteForm
def vote(request):
if request.method == "POST":
form = UserVoteForm(request.POST)
if form.is_valid():
vote = form.save(commit=False)
vote = UserVote.objects.vote(request.user, vote.vote)
messages.info(request, "Your mood is %s" % vote.get_vote_display())
else:
form = UserVoteForm()
return HttpResponseRedirect(reverse('dashboard'))
|
django-stars/dash2011
|
presence/apps/vote/views.py
|
Python
|
bsd-3-clause
| 684 | 0 |
# coding: utf-8
"""`MemoryFS` opener definition.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import typing
from .base import Opener
from .registry import registry
if typing.TYPE_CHECKING:
from typing import Text
from .parse import ParseResult
from ..memoryfs import MemoryFS # noqa: F401
@registry.install
class MemOpener(Opener):
"""`MemoryFS` opener."""
protocols = ["mem"]
def open_fs(
self,
fs_url, # type: Text
parse_result, # type: ParseResult
writeable, # type: bool
create, # type: bool
cwd, # type: Text
):
# type: (...) -> MemoryFS
from ..memoryfs import MemoryFS
mem_fs = MemoryFS()
return mem_fs
|
PyFilesystem/pyfilesystem2
|
fs/opener/memoryfs.py
|
Python
|
mit
| 808 | 0 |
#!/usr/bin/env python
# coding=utf-8
__author__ = 'Jayin Ton'
from flask import Flask
app = Flask(__name__)
host = '127.0.0.1'
port = 8000
@app.route('/')
def index():
return 'welcome'
if __name__ == '__main__':
app.run(host=host, port=port, debug=True)
|
jayinton/FlaskDemo
|
simple/index.py
|
Python
|
mit
| 269 | 0 |
#!/usr/bin/env python
from distutils.core import setup,Extension
from distutils.command.build_py import build_py
dist = setup(name='PyMobot',
version='0.1',
description='Mobot Control Python Library',
author='David Ko',
author_email='david@barobo.com',
url='http://www.barobo.com',
packages=['barobo'],
ext_modules=[Extension('barobo._mobot',
['barobo/mobot.i'],
swig_opts=['-c++', '-I../'],
include_dirs=['../', '../BaroboConfigFile', '../BaroboConfigFile/mxml-2.7'],
define_macros=[('NONRELEASE','1')],
extra_compile_args=['-fpermissive'],
library_dirs=['../', '../BaroboConfigFile', '../BaroboConfigFile/mxml-2.7'],
libraries=['baroboStatic', 'baroboconfigfile', 'mxml', 'pthread', 'ws2_32'],
)],
)
build_py = build_py(dist)
build_py.ensure_finalized()
build_py.run()
|
BaroboRobotics/libbarobo
|
PyMobot/setup_win32.py
|
Python
|
gpl-3.0
| 855 | 0.016374 |
#!/usr/bin/env python
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
from tinycss.css21 import CSS21Parser
from tinycss.parsing import remove_whitespace, split_on_comma, ParseError
class MediaQuery(object):
__slots__ = 'media_type', 'expressions', 'negated'
def __init__(self, media_type='all', expressions=(), negated=False):
self.media_type = media_type
self.expressions = expressions
self.negated = negated
def __repr__(self):
return '<MediaQuery type=%s negated=%s expressions=%s>' % (
self.media_type, self.negated, self.expressions)
def __eq__(self, other):
return self.media_type == getattr(other, 'media_type', None) and \
self.negated == getattr(other, 'negated', None) and \
self.expressions == getattr(other, 'expressions', None)
class MalformedExpression(Exception):
def __init__(self, tok, msg):
Exception.__init__(self, msg)
self.tok = tok
class CSSMedia3Parser(CSS21Parser):
''' Parse media queries as defined by the CSS 3 media module '''
def parse_media(self, tokens, errors):
if not tokens:
return [MediaQuery('all')]
queries = []
for part in split_on_comma(remove_whitespace(tokens)):
negated = False
media_type = None
expressions = []
try:
for i, tok in enumerate(part):
if i == 0 and tok.type == 'IDENT':
val = tok.value.lower()
if val == 'only':
continue # ignore leading ONLY
if val == 'not':
negated = True
continue
if media_type is None and tok.type == 'IDENT':
media_type = tok.value
continue
elif media_type is None:
media_type = 'all'
if tok.type == 'IDENT' and tok.value.lower() == 'and':
continue
if not tok.is_container:
raise MalformedExpression(tok, 'expected a media expression not a %s' % tok.type)
if tok.type != '(':
raise MalformedExpression(tok, 'media expressions must be in parentheses not %s' % tok.type)
content = remove_whitespace(tok.content)
if len(content) == 0:
raise MalformedExpression(tok, 'media expressions cannot be empty')
if content[0].type != 'IDENT':
raise MalformedExpression(content[0], 'expected a media feature not a %s' % tok.type)
media_feature, expr = content[0].value, None
if len(content) > 1:
if len(content) < 3:
raise MalformedExpression(content[1], 'malformed media feature definition')
if content[1].type != ':':
raise MalformedExpression(content[1], 'expected a :')
expr = content[2:]
if len(expr) == 1:
expr = expr[0]
elif len(expr) == 3 and (expr[0].type, expr[1].type, expr[1].value, expr[2].type) == (
'INTEGER', 'DELIM', '/', 'INTEGER'):
# This should really be moved into token_data, but
# since RATIO is not part of CSS 2.1 and does not
# occur anywhere else, we special case it here.
r = expr[0]
r.value = (expr[0].value, expr[2].value)
r.type = 'RATIO'
r._as_css = expr[0]._as_css + expr[1]._as_css + expr[2]._as_css
expr = r
else:
raise MalformedExpression(expr[0], 'malformed media feature definition')
expressions.append((media_feature, expr))
except MalformedExpression as err:
errors.extend(ParseError(err.tok, err.message))
media_type, negated, expressions = 'all', True, ()
queries.append(MediaQuery(media_type or 'all', expressions=tuple(expressions), negated=negated))
return queries
|
nozuono/calibre-webserver
|
src/tinycss/media3.py
|
Python
|
gpl-3.0
| 4,645 | 0.003229 |
# extension imports
from _NetworKit import PageRankNibble, GCE
|
fmaschler/networkit
|
networkit/scd.py
|
Python
|
mit
| 62 | 0.016129 |
"""Unit tests for the copy module."""
import sys
import copy
import copy_reg
import unittest
from test import test_support
class TestCopy(unittest.TestCase):
# Attempt full line coverage of copy.py from top to bottom
def test_exceptions(self):
self.assert_(copy.Error is copy.error)
self.assert_(issubclass(copy.Error, Exception))
# The copy() method
def test_copy_basic(self):
x = 42
y = copy.copy(x)
self.assertEqual(x, y)
def test_copy_copy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
x = C(42)
y = copy.copy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_copy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.copy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.copy(x)
def test_copy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
def test_copy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.copy, x)
# Type-specific _copy_xxx() methods
def test_copy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.copy(x) is x, repr(x))
def test_copy_list(self):
x = [1, 2, 3]
self.assertEqual(copy.copy(x), x)
def test_copy_tuple(self):
x = (1, 2, 3)
self.assertEqual(copy.copy(x), x)
def test_copy_dict(self):
x = {"foo": 1, "bar": 2}
self.assertEqual(copy.copy(x), x)
def test_copy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_copy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __copy__(self):
return C(self.foo)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
def test_copy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C(42)
self.assertEqual(copy.copy(x), x)
# The deepcopy() method
def test_deepcopy_basic(self):
x = 42
y = copy.deepcopy(x)
self.assertEqual(y, x)
def test_deepcopy_memo(self):
# Tests of reflexive objects are under type-specific sections below.
# This tests only repetitions of objects.
x = []
x = [x, x]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0] is y[1])
def test_deepcopy_issubclass(self):
# XXX Note: there's no way to test the TypeError coming out of
# issubclass() -- this can only happen when an extension
# module defines a "type" that doesn't formally inherit from
# type.
class Meta(type):
pass
class C:
__metaclass__ = Meta
self.assertEqual(copy.deepcopy(C), C)
def test_deepcopy_deepcopy(self):
class C(object):
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo=None):
return C(self.foo)
x = C(42)
y = copy.deepcopy(x)
self.assertEqual(y.__class__, x.__class__)
self.assertEqual(y.foo, x.foo)
def test_deepcopy_registry(self):
class C(object):
def __new__(cls, foo):
obj = object.__new__(cls)
obj.foo = foo
return obj
def pickle_C(obj):
return (C, (obj.foo,))
x = C(42)
self.assertRaises(TypeError, copy.deepcopy, x)
copy_reg.pickle(C, pickle_C, C)
y = copy.deepcopy(x)
def test_deepcopy_reduce_ex(self):
class C(object):
def __reduce_ex__(self, proto):
return ""
def __reduce__(self):
raise test_support.TestFailed, "shouldn't call this"
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_reduce(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.deepcopy(x)
self.assert_(y is x)
def test_deepcopy_cant(self):
class C(object):
def __getattribute__(self, name):
if name.startswith("__reduce"):
raise AttributeError, name
return object.__getattribute__(self, name)
x = C()
self.assertRaises(copy.Error, copy.deepcopy, x)
# Type-specific _deepcopy_xxx() methods
def test_deepcopy_atomic(self):
class Classic:
pass
class NewStyle(object):
pass
def f():
pass
tests = [None, 42, 2L**100, 3.14, True, False, 1j,
"hello", u"hello\u1234", f.func_code,
NewStyle, xrange(10), Classic, max]
for x in tests:
self.assert_(copy.deepcopy(x) is x, repr(x))
def test_deepcopy_list(self):
x = [[1, 2], 3]
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_list(self):
x = []
x.append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_tuple(self):
x = ([1, 2], 3)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_deepcopy_reflexive_tuple(self):
x = ([],)
x[0].append(x)
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y[0] is not x[0])
self.assert_(y[0][0] is y)
def test_deepcopy_dict(self):
x = {"foo": [1, 2], "bar": 3}
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_deepcopy_reflexive_dict(self):
x = {}
x['foo'] = x
y = copy.deepcopy(x)
self.assertRaises(RuntimeError, cmp, y, x)
self.assert_(y is not x)
self.assert_(y['foo'] is y)
self.assertEqual(len(y), 1)
def test_deepcopy_keepalive(self):
memo = {}
x = 42
y = copy.deepcopy(x, memo)
self.assert_(memo[id(x)] is x)
def test_deepcopy_inst_vanilla(self):
class C:
def __init__(self, foo):
self.foo = foo
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_deepcopy(self):
class C:
def __init__(self, foo):
self.foo = foo
def __deepcopy__(self, memo):
return C(copy.deepcopy(self.foo, memo))
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getinitargs(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getinitargs__(self):
return (self.foo,)
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return {"foo": self.foo}
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __setstate__(self, state):
self.foo = state["foo"]
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_inst_getstate_setstate(self):
class C:
def __init__(self, foo):
self.foo = foo
def __getstate__(self):
return self.foo
def __setstate__(self, state):
self.foo = state
def __cmp__(self, other):
return cmp(self.foo, other.foo)
x = C([42])
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y is not x)
self.assert_(y.foo is not x.foo)
def test_deepcopy_reflexive_inst(self):
class C:
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# _reconstruct()
def test_reconstruct_string(self):
class C(object):
def __reduce__(self):
return ""
x = C()
y = copy.copy(x)
self.assert_(y is x)
y = copy.deepcopy(x)
self.assert_(y is x)
def test_reconstruct_nostate(self):
class C(object):
def __reduce__(self):
return (C, ())
x = C()
x.foo = 42
y = copy.copy(x)
self.assert_(y.__class__ is x.__class__)
y = copy.deepcopy(x)
self.assert_(y.__class__ is x.__class__)
def test_reconstruct_state(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_state_setstate(self):
class C(object):
def __reduce__(self):
return (C, (), self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
x = C()
x.foo = [42]
y = copy.copy(x)
self.assertEqual(y, x)
y = copy.deepcopy(x)
self.assertEqual(y, x)
self.assert_(y.foo is not x.foo)
def test_reconstruct_reflexive(self):
class C(object):
pass
x = C()
x.foo = x
y = copy.deepcopy(x)
self.assert_(y is not x)
self.assert_(y.foo is y)
# Additions for Python 2.3 and pickle protocol 2
def test_reduce_4tuple(self):
class C(list):
def __reduce__(self):
return (C, (), self.__dict__, iter(self))
def __cmp__(self, other):
return (cmp(list(self), list(other)) or
cmp(self.__dict__, other.__dict__))
x = C([[1, 2], 3])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is y[0])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_reduce_5tuple(self):
class C(dict):
def __reduce__(self):
return (C, (), self.__dict__, None, self.iteritems())
def __cmp__(self, other):
return (cmp(dict(self), list(dict)) or
cmp(self.__dict__, other.__dict__))
x = C([("foo", [1, 2]), ("bar", 3)])
y = copy.copy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is y["foo"])
y = copy.deepcopy(x)
self.assertEqual(x, y)
self.assert_(x is not y)
self.assert_(x["foo"] is not y["foo"])
def test_copy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.copy(x)
self.assert_(x.foo is y.foo)
def test_deepcopy_slots(self):
class C(object):
__slots__ = ["foo"]
x = C()
x.foo = [42]
y = copy.deepcopy(x)
self.assertEqual(x.foo, y.foo)
self.assert_(x.foo is not y.foo)
def test_copy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.copy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is y[0])
self.assert_(x.foo is y.foo)
def test_deepcopy_list_subclass(self):
class C(list):
pass
x = C([[1, 2], 3])
x.foo = [4, 5]
y = copy.deepcopy(x)
self.assertEqual(list(x), list(y))
self.assertEqual(x.foo, y.foo)
self.assert_(x[0] is not y[0])
self.assert_(x.foo is not y.foo)
def test_copy_tuple_subclass(self):
class C(tuple):
pass
x = C([1, 2, 3])
self.assertEqual(tuple(x), (1, 2, 3))
y = copy.copy(x)
self.assertEqual(tuple(y), (1, 2, 3))
def test_deepcopy_tuple_subclass(self):
class C(tuple):
pass
x = C([[1, 2], 3])
self.assertEqual(tuple(x), ([1, 2], 3))
y = copy.deepcopy(x)
self.assertEqual(tuple(y), ([1, 2], 3))
self.assert_(x is not y)
self.assert_(x[0] is not y[0])
def test_getstate_exc(self):
class EvilState(object):
def __getstate__(self):
raise ValueError, "ain't got no stickin' state"
self.assertRaises(ValueError, copy.copy, EvilState())
def test_main():
test_support.run_unittest(TestCopy)
if __name__ == "__main__":
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.4/Lib/test/test_copy.py
|
Python
|
mit
| 17,174 | 0.002678 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import argparse
#import imp
from validator import *
from settings import *
from utils import *
parser = argparse.ArgumentParser(description='Sync two Databases', epilog="Es: python main.py -run test --db-master=mysql://root:remotepasswd@192.168.0.212/dbname --db-slave=mysql://root:passwdx@localhost/dbname")
parser.add_argument('-run', dest='run', action='store', type=str, choices=['test','execute'], required=True,
help='Test Produce only a simulation. Execute does the job !')
parser.add_argument('--no-schemacheck', required=False, action='store_true', help='disable schema check. Use it only if you are shure that the DB\'s schemas are identical and you want bypass the diff valuation of the DB_maps files.')
#parser.add_argument('--verbose', required=False, action='store_true', help='view a lot of usefull/useless output')
parser.add_argument('--db-master', dest='master', action='store',
required=True, help='es: mysql://user:password@hostname[:3306]/dbname where the data is taken from',
type=RegexValidator(DB_CONNECTOR_PATTERN))
parser.add_argument('--db-slave', dest='slave', action='store',
type=RegexValidator(DB_CONNECTOR_PATTERN),
required=True, help='es: mysql://user:password@hostname[:3306]/dbname where we need to store and sync the data taken from master')
parser.add_argument('--tables', required=True, action='store', help='tables names separated by a comma and no space, like this: --tables users,groups,tb_matchs')
parser.add_argument('--version', action='version', version='pySyncDB 0.1a')
if __name__ == '__main__':
# fetch arguments from sys.args with a little help from argsparse module :)
args = parser.parse_args()
# control if the folder where the tables_maps will stored exists
Verify_DBMAPS_Folder()
# DO backup
# TODO: a procedure to do a backups with creational statements and insert queries from sqlalchemy
# Backup(db_name)
# producing tables_maps with sqlautocode helps a lot :)
SQLAutoCodeMap('master', args)
SQLAutoCodeMap('slave', args)
# if there's not the --no-schemacheck this will start the Schema Comparison to control
# that the two files are identical
if not args.no_schemacheck: SchemaComparator()
# use imp to import the tables_schemes. This make all the things more simple !
# deprecated: I abandoned it because of ugly warnings like this:
# RuntimeWarning: Parent module 'master_schema' not found while handling absolute import...
#master_schema = imp.load_source('master_schema.py', DB_MAP_FOLDER+'/master_schema.py')
#slave_schema = imp.load_source('slave_schema.py', DB_MAP_FOLDER+'/slave_schema.py')
# now I use simply this :)
sys.path.append(DB_MAP_FOLDER)
from pydbsync import *
master = SessionManager('master', args.master)
slave = SessionManager('slave', args.slave)
for table in args.tables.split(','):
if args.run == 'test': args.run = None
g = pyTableSyncManager(master, slave, table, args.run)
g.InspectTable()
if g.ProposedUpdates:
g.CommitUpdates()
if g.ProposedInsertions:
g.CommitInsertions()
if g.ProposedDeletions:
g.CommitDeletions()
# purge it!
del(g)
|
peppelinux/pyDBsync
|
src/main.py
|
Python
|
bsd-3-clause
| 3,261 | 0.029745 |
#!/usr/bin/python
import logging, sys, re, getpass, argparse, pprint, csv, time
from pysphere import MORTypes, VIServer, VITask, VIProperty, VIMor, VIException
from pysphere.vi_virtual_machine import VIVirtualMachine
from pysphere.resources import VimService_services as VI
def sizeof_fmt(num):
for x in ['bytes','KB','MB']:
num /= 1024.0
return "%3.5f" % (num)
def get_vm_permissions(auth_manager, vm_mor, request):
vm_mor_type = "VirtualMachine"
_this = request.new__this(auth_manager)
_this.set_attribute_type(auth_manager.get_attribute_type())
request.set_element__this(_this)
entity = request.new_entity(vm_mor)
entity.set_attribute_type(vm_mor_type)
request.set_element_entity(entity)
request.set_element_inherited(True)
response = server._proxy.RetrieveEntityPermissions(request)
permissions = response._returnval
perm_array = [(p.Principal, p.RoleId) for p in permissions]
return perm_array
def write_report(vms_info, csvfile, dirname, c):
for val in vms_info.values():
c.writerow([val['Folder'], val['vm'], val['numCPU'], val['MBmemory'], val['storageUsed'], val['storageCommitted'],val['ESXihost'], val['datastores'],
val['vmConfig'], val['networks'], val['netids'], val['vmOS'], val['vmTools'], val['vmPower'], val['vmDNS'], val['Note'],
val['cpuReservationMhz'], val['cpuLimitMhz'], val['memReservationMB'], val['memLimitMB'], val['HardDisks'],
val['CDdrive'], val['snapshots'], val['Permissions'] ])
def create_vm_dict():
vm = {'vmId': None, 'vm': None, 'numCPU': None, 'MBmemory': None, 'vmConfig': None, 'Note': None, 'vmOS': None, 'vmDNS': None,
'vmPower': None, 'vmTools': None, 'cpuReservationMhz': None, 'cpuLimitMhz': None, 'memReservationMB': None, 'memLimitMB': None,
'networks': None, 'datastores': None, 'netids': None, 'snapshots': None, 'CDdrive': None,
'ESXihost': None, 'HardDisks': None, 'storageUsed': None, 'storageCommitted': None, 'Folder': None, 'Permissions': None}
return vm
def create_csv_header():
csv_header = ["Folder", "vmName", "numCPU", "MBmemory", "GBstorage", "GBcommitted", "ESXhost", "datastores", "vmConfig", "NICs",
"NetIDs", "vmOS", "vmTools", "vmPower", "vmDNS", "Note",
"cpuReservationMhz", "cpuLimitMhz", "memReservationMB", "memLimitMB",
"HardDisks", "CDdrive", "Snapshots", "vmPermissions"]
return csv_header
def create_vm_props():
properties = ['name','config.hardware.device', 'config.hardware.numCPU',
'config.hardware.memoryMB', 'config.files.vmPathName',
'runtime.host', 'config.version', 'summary.runtime.powerState',
'config.annotation', 'config.guestFullName', 'guest.hostName',
'guest.toolsVersion', 'guest.disk', 'guest.net',
'resourceConfig.cpuAllocation.reservation',
'resourceConfig.cpuAllocation.limit',
'resourceConfig.memoryAllocation.reservation',
'resourceConfig.memoryAllocation.limit',
'datastore', 'snapshot', 'layoutEx.file', 'storage.perDatastoreUsage']
return properties
def create_me_props():
return ['name', 'parent']
def get_dvp_dict(datacenters, datacentername, server):
dvpgs = {}
# GET INITIAL PROPERTIES AND OBJECTS
dcmor = [k for k,v in datacenters if v==datacentername][0]
dcprops = VIProperty(server, dcmor)
# networkFolder managed object reference
nfmor = dcprops.networkFolder._obj
dvpg_mors = server._retrieve_properties_traversal(property_names=['name','key'], from_node=nfmor, obj_type='DistributedVirtualPortgroup')
# building dictionary with the DVS
for dvpg in dvpg_mors:
mor = dvpg.Obj
entity = {}
for p in dvpg.PropSet:
entity[p.Name]=p.Val
dvpgs[mor] = entity
return dvpgs
def get_path(entity, entities_info):
parent = entity.get('parent')
display_name = "%s" % (entity['name'])
if parent and parent in entities_info:
return get_path(entities_info[parent], entities_info) + " > " + display_name
return display_name
def get_paths_dict(server, properties2):
entities_info = {}
paths = {}
# getting managed entities
props2 = server._retrieve_properties_traversal(property_names=properties2, obj_type='ManagedEntity')
# building a dictionary with the Managed Entities info
for prop in props2:
mor = prop.Obj
entity = {'id':mor, 'name':None, 'parent':None,'type':mor.get_attribute_type()}
for p in prop.PropSet:
entity[p.Name] = p.Val
entities_info[mor] = entity
# building dictionary with VMs vs path
for entity in entities_info.itervalues():
if entity['type'] == "VirtualMachine":
paths[entity['id']] = {'id': entity['id'], 'path':get_path(entity, entities_info)}
return paths
def set_dir(directory):
if directory:
return directory
else:
logger.info('Using default directory /tmp')
return '/tmp'
def getDateSuffix():
return '_'+time.strftime("%Y-%m-%d")
def set_filename(filename):
if filename:
return filename + getDateSuffix()
else:
logger.info('Using default filename vsphere-inventory')
return 'vsphere-inventory' + getDateSuffix()
def get_args():
# Creating the argument parser
parser = argparse.ArgumentParser(description="Report full vShere inventory to a CSV file")
parser.add_argument('-s', '--server', nargs=1, required=True, help='The vCenter or ESXi server to connect to', dest='server', type=str)
parser.add_argument('-u', '--user', nargs=1, required=True, help='The username with which to connect to the server', dest='username', type=str)
parser.add_argument('-p', '--password', nargs=1, required=False, help='The password with which to connect to the host. If not specified, the user is prompted at runtime for a password', dest='password', type=str)
parser.add_argument('-c', '--dc', nargs=1, required=True, help='The datacenter name you wish to report', dest='dcname', type=str)
parser.add_argument('-D', '--dir', required=False, help='Write CSV to a specific directory. Default /tmp', dest='directory', type=str)
parser.add_argument('-f', '--filename', required=False, help='File name. Default vsphere-inventory.csv', dest='filename', type=str)
parser.add_argument('-v', '--verbose', required=False, help='Enable verbose output', dest='verbose', action='store_true')
parser.add_argument('-d', '--debug', required=False, help='Enable debug output', dest='debug', action='store_true')
parser.add_argument('-l', '--log-file', nargs=1, required=False, help='File to log to (default = stdout)', dest='logfile', type=str)
parser.add_argument('-V', '--version', action='version', version="%(prog)s (version 0.4)")
args = parser.parse_args()
return args
def get_vms_dict(server, properties, paths, hosts_dict, datastores_dict, dvpgs):
vms_info = {}
# getting VMs info
props = server._retrieve_properties_traversal(property_names=properties, obj_type='VirtualMachine')
#build a dictionary with the VMs info
for prop in props:
mor = prop.Obj
vm = create_vm_dict()
for p in prop.PropSet:
vm['vmId'] = mor
if p.Name == "name":
vm['vm'] = p.Val
elif p.Name == "config.hardware.numCPU":
vm['numCPU'] = p.Val
elif p.Name == "config.hardware.memoryMB":
vm['MBmemory'] = p.Val
elif p.Name == "config.files.vmPathName":
vm['vmConfig'] = p.Val
elif p.Name == "config.annotation":
annotation = str(p.Val)
annotation = annotation.replace('\n',' ')
vm['Note']= annotation
elif p.Name == "config.guestFullName":
vm['vmOS'] = p.Val
elif p.Name == "guest.hostName":
vm['vmDNS'] = p.Val
elif p.Name == "summary.runtime.powerState":
vm['vmPower'] = p.Val
elif p.Name == "guest.toolsVersion":
vm['vmTools'] = p.Val
elif p.Name == "resourceConfig.cpuAllocation.reservation":
vm['cpuReservationMhz'] = p.Val
elif p.Name == "resourceConfig.cpuAllocation.limit":
vm['cpuLimitMhz'] = p.Val
elif p.Name == "resourceConfig.memoryAllocation.reservation":
vm['memReservationMB'] = p.Val
elif p.Name == "resourceConfig.memoryAllocation.limit":
vm['memLimitMB'] = p.Val
elif p.Name == "guest.net":
netids = {}
for nic in getattr(p.Val, "GuestNicInfo", []):
netids[getattr(nic, 'MacAddress', None)] = getattr(nic, 'IpAddress', None)
vm['netids'] = netids
elif p.Name == "config.hardware.device":
cdroms = []
# macs = []
nets = {}
for data in p.Val.VirtualDevice:
if data.typecode.type[1] == "VirtualCdrom" and data.Connectable.Connected:
cdroms.append(data.DeviceInfo.Summary)
elif data.typecode.type[1] in ["VirtualE1000", "VirtualE1000e", "VirtualPCNet32", "VirtualVmxnet", "VirtualVmxnet3", "VirtualVmxnet2"]:
# NetIDs
# macs.append(getattr(data, "MacAddress", 'NA'))
# Getting DV switch vs NIcs
niclabel = data.DeviceInfo.Label
port = None
port = getattr(data.Backing, "Port", None)
if port:
dvpid = getattr(port, "PortgroupKey", "NA")
nets [niclabel] = [v['name'] for k, v in dvpgs.items() if k == dvpid]
else:
nets [niclabel] = 'NA'
vm['CDdrive'] = cdroms
vm['networks'] = nets
# already populated
# if vm['netids'] is None:
# vm['netids']= macs
# elif p.Name == "guest.disk":
# hddsum = 0
# for data in getattr(p.Val, "GuestDiskInfo", []):
# hddsum += int(getattr(data , "Capacity", 0))
# vm['diskCapacity'] = sizeof_fmt(hddsum)
elif p.Name == "datastore":
datastores = []
for data in getattr(p.Val, "_ManagedObjectReference"):
datastores.append([v for k, v in datastores_dict if k == data])
vm["datastores"] = datastores
elif p.Name == "storage.perDatastoreUsage":
usedStorage = 0
committed = 0
uncommitted = 0
unshared = 0
for data in getattr(p.Val, "VirtualMachineUsageOnDatastore", []):
committed += getattr(data , "Committed", 0)
uncommitted += getattr(data , "Uncommitted", 0)
unshared += getattr(data, "Unshared", 0)
vm['storageCommitted'] = sizeof_fmt(committed)
vm['storageUsed'] = sizeof_fmt(committed + uncommitted + unshared)
elif p.Name == "snapshot":
snapshots = []
for data in getattr(p.Val, "_rootSnapshotList"):
snapshot_str = str(getattr(data,"Id")) + "; " + str(getattr(data,"Name")) + "; " + str(getattr(data,"Description")) + "; " + str(getattr(data, "CreateTime"))
snapshots.append(snapshot_str)
vm["snapshots"] = snapshots
elif p.Name == "runtime.host":
vm["ESXihost"] = [v for k, v in hosts_dict if k == p.Val]
elif p.Name == "layoutEx.file":
files = []
for data in getattr(p.Val, "VirtualMachineFileLayoutExFileInfo"):
if getattr(data, 'Type') in ["diskDescriptor","diskExtent"]:
files.append(getattr(data, 'Name'))
vm['HardDisks'] = files
else:
vm[p.Name] = p.Val
vms_info[mor] = vm
# adding paths to vms
for vm_info in vms_info.values():
for path in paths.values():
if vm_info.get('vmId') == path.get('id'):
vm_info['Folder'] = path.get('path')
# Getting and Setting VM permission
request = VI.RetrieveEntityPermissionsRequestMsg()
auth_manager = server._do_service_content.AuthorizationManager
for vm_info in vms_info.values():
vm_info['Permissions'] = get_vm_permissions(auth_manager, vm_info.get('vmId'), request)
return vms_info
# Parsing values
args = get_args()
argsdict = vars(args)
servervctr = args.server[0]
username = args.username[0]
dcname = args.dcname[0]
verbose = args.verbose
debug = args.debug
log_file = None
password = None
directory = args.directory
filename = args.filename
if args.password:
password = args.password[0]
if args.logfile:
log_file = args.logfile[0]
# Logging settings
if debug:
log_level = logging.DEBUG
elif verbose:
log_level = logging.INFO
else:
log_level = logging.WARNING
# Initializing logger
if log_file:
logfile = log_file + getDateSuffix() + '.log'
logging.basicConfig(filename=logfile,format='%(asctime)s %(levelname)s %(message)s',level=log_level)
logger = logging.getLogger(__name__)
else:
logging.basicConfig(filename=log_file,format='%(asctime)s %(levelname)s %(message)s',level=log_level)
logger = logging.getLogger(__name__)
logger.debug('logger initialized')
# CSV configuration
csvfile = set_filename(filename)
dirname = set_dir(directory)
csv_header = create_csv_header()
c = None
try:
logger.debug('Setting up CSV file %s/%s.csv' % (dirname, csvfile))
c = csv.writer(open(dirname+"/"+csvfile+".csv", "wb"), quoting=csv.QUOTE_ALL)
c.writerow(csv_header)
logger.info('Successfully created CSV file %s/%s.csv' % (dirname, csvfile))
except IOException as inst:
logger.error(inst)
logger.error('Due to previous errors, program will exit')
sys.exti()
# Asking Users password for server
if password is None:
logger.debug('No command line password received, requesting password from user')
password = getpass.getpass(prompt='Enter password for vCenter %s for user %s: ' % (servervctr,username))
# Connecting to server
logger.info('Connecting to server %s with username %s' % (servervctr,username))
server = VIServer()
try:
logger.debug('Trying to connect with provided credentials')
server.connect(servervctr,username,password)
logger.info('Connected to server %s' % servervctr)
logger.debug('Server type: %s' % server.get_server_type())
logger.debug('API version: %s' % server.get_api_version())
except VIException as ins:
logger.error(ins)
logger.debug('Loggin error. Program will exit now.')
sys.exit()
if dcname is None:
logger.error('No datacenter name. Progam will exit now.')
sys.exit()
# Setting up properties
logger.debug('Getting properties to query')
properties = create_vm_props()
logger.debug('First set of properties: %s' %properties)
properties2 = create_me_props()
logger.debug('Second set of properties: %s' %properties2)
# Dictionaries and additional variables configuration
vms_info = {}
hosts_dict = None
datastores_dict = None
dvpgs = {}
paths = {}
props = None
# hosts, datastores, dvpgs, paths and vms
try:
hosts_dict = server.get_hosts().items()
logger.debug('Host dictionary generated with size %d' % (len(hosts_dict)))
datastores_dict = server.get_datastores().items()
logger.debug('Datastores dictionary generated with size %d' % (len(datastores_dict)))
datacenters = server.get_datacenters().items()
logger.debug('Datacenters dictionary generated with size %d' % (len(datacenters)))
dvpgs = get_dvp_dict(datacenters, dcname, server)
logger.debug('Distributed Virtual Portgroup dictionary generated with size %d' % (len(dvpgs)))
paths = get_paths_dict(server, properties2)
logger.debug('VM Paths dictionary generated with size %d' % (len(paths)))
logger.info('Pre-required dictionaries were successfully gotten: Hosts (%s), Datastores (%s), Datacenters(%s), DVPG(%s) and VM Paths(%s)' %(len(hosts_dict), len(datastores_dict), len(datacenters), len(dvpgs), len(paths)))
logger.info('Building main Virtual Machine properties dictionary. This might take a few minutes.')
vms_info = get_vms_dict(server, properties, paths, hosts_dict, datastores_dict, dvpgs)
logger.debug('VM main dictionary generated with size %d' %(len(vms_info)))
# Disconnecting from server
logger.info('Terminating server %s session' % servervctr)
server.disconnect()
except VIException as inst:
logger.error(inst)
logger.error('An unexpected error occurred. Program will exit')
sys.exit()
# CSV report
try:
logger.debug('Writting report to %s/%s.csv' % (dirname, csvfile))
write_report(vms_info, csvfile, dirname, c)
logger.info('Successfully written CSV report %s/%s.csv' % (dirname, csvfile))
except IOException as inst:
logger.error(inst)
logger.error('An unexpected error occurred. Program will exit')
sys.exit()
|
jm66/vsphere_inventory_report
|
vsphere_inventory_report.py
|
Python
|
gpl-2.0
| 17,553 | 0.014357 |
from PdfProcessor import *
import argparse
from datetime import datetime
import ConfigParser
import ProcessLogger
import traceback
from urllib2 import HTTPError, URLError
parser = argparse.ArgumentParser(description='Processes the pdf and extracts the text')
parser.add_argument('-l','--language', help='Language of input pdf file for transcription (english, french, spanish).', required=False, default="english")
parser.add_argument('-i','--infile', help='File path of the input pdf file.', required=True)
parser.add_argument('-o','--outdir', help='File name of the output csv file.', required=True)
results = parser.parse_args()
allowed_languages = ["english", "french", "spanish", "portuguese", "arabic"]
pdfProcessor = ""
try:
logger = ProcessLogger.getLogger('run')
logger.info("Processing started at %s ", str(datetime.now()))
logger.info("input: %s", results.infile)
logger.info("outdir: %s", results.outdir)
if results.language.lower() not in allowed_languages:
raise Exception("language should be one of english, french, spanish, portuguese or arabic")
if results.language.lower() == "portuguese":
results.language = "portuguesestandard"
configParser = ConfigParser.RawConfigParser()
configParser.read(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings.config'))
pdfProcessor = PDFProcessor(results.infile, results.outdir, results.language.lower())
pdfProcessor.setConfigParser(configParser)
pdfProcessor.writeStats()
if pdfProcessor.isStructured():
pdfProcessor.extractTextFromStructuredDoc()
else:
pdfProcessor.extractTextFromScannedDoc()
except URLError as e:
logger.error("URLError: %s", e.reason);
logger.debug(traceback.format_exception(*sys.exc_info()))
except HTTPError as e:
logger.error("HTTPError: [%s] %s", e.code, e.reason);
logger.debug(traceback.format_exception(*sys.exc_info()))
except OSError as e:
logger.error("OSError: %s [%s] in %s", e.strerror, e.errno, e.filename);
logger.debug(traceback.format_exception(*sys.exc_info()))
except Exception as e:
logger.error("Exception: %s ", e);
logger.debug(traceback.format_exception(*sys.exc_info()))
finally:
logger.info("Processing ended at %s ", str(datetime.now()));
|
anjesh/pdf-processor
|
run.py
|
Python
|
mit
| 2,284 | 0.007005 |
#!/usr/bin/python
import requests
import time, Cookie
# Instantiate a SimpleCookie object
cookie = Cookie.SimpleCookie()
# The SimpleCookie instance is a mapping
cookie['lastvisit'] = str(time.time())
s = requests.session()
s.cookies.clear()
# Output the HTTP message containing the cookie
print cookie
print 'Content-Type: text/html\n'
print '<html><body>'
print 'Server time is', time.asctime(time.localtime())
print '</body></html>'
|
5610110083/Safety-in-residential-project
|
cgi-bin/any/setCookies.py
|
Python
|
apache-2.0
| 447 | 0.029083 |
"""
commswave
=========
Takes device communications up and down according to a timefunction.
Comms will be working whenever the timefunction returns non-zero.
Configurable parameters::
{
"timefunction" : A timefunction definition
"threshold" : (optional) Comms will only work when the timefunction is returning >= threshold. If missing then any non-zero value will make comms work.
"gate_properties" : (optional) ["list", "of", "properties"] If this is defined, then instead of taking whole comms up and down, only these specific properties are gated
}
Device properties created::
{
}
"""
from .device import Device
from common import importer
import logging
class Commswave(Device):
def __init__(self, instance_name, time, engine, update_callback, context, params):
"""Take Comms up and down according to some time function"""
tf = params["commswave"]["timefunction"]
self.comms_timefunction = importer.get_class("timefunction", list(tf.keys())[0])(engine, self, tf[list(tf.keys())[0]])
self.comms_tf_threshold = params["commswave"].get("threshold", None)
self.comms_gate_properties = params["commswave"].get("gate_properties", None)
self.messages_sent = 0
self.messages_attempted = 0
super(Commswave,self).__init__(instance_name, time, engine, update_callback, context, params)
def timefunction_says_communicate(self):
thresh = 0.0
if self.comms_tf_threshold is not None:
thresh = self.comms_tf_threshold
return self.comms_timefunction.state() > thresh
def comms_ok(self):
if self.comms_gate_properties is not None: # If we're gating individual properties, then don't gate overall comms
return super(Commswave, self).comms_ok()
else:
self.messages_attempted += 1
is_ok = super(Commswave, self).comms_ok()
is_ok = is_ok and self.timefunction_says_communicate()
if is_ok:
self.messages_sent += 1
return is_ok
def transmit(self, the_id, ts, properties, force_comms):
if self.comms_gate_properties is not None: # We're gating properties
if not self.timefunction_says_communicate():
for p in self.comms_gate_properties:
properties.pop(p, None) # Remove the property, if it's there
super(Commswave, self).transmit(the_id, ts, properties, force_comms)
def external_event(self, event_name, arg):
super(Commswave, self).external_event(event_name, arg)
def close(self):
super(Commswave,self).close()
logging.info("Comms report for " + str(self.properties["$id"]) + " " +
str(self.messages_sent) + " sent ("+str(100 * self.messages_sent/self.messages_attempted) + "%) from " +
str(self.messages_attempted) + " total")
# Private methods
## (we don't actually need to tick, as we can instantaneously look up timefunction state whenever we need to)
## def tick_commswave(self, _):
## self.ok_commswave = self.comms_timefunction.state()
## self.engine.register_event_at(self.comms_timefunction.next_change(), self.tick_commswave, self, self)
|
DevicePilot/synth
|
synth/devices/commswave.py
|
Python
|
mit
| 3,252 | 0.006458 |
import json
from plugins import gateway_speaker
from plugins.magnet import MAGNET_STORE_KEY
DOOR_SENSOR_SID = '158d0001837ec2'
def run(store, conn, cursor):
"""Play sound on the Gateway when somebody opens the door"""
p = store.pubsub(ignore_subscribe_messages=True)
p.subscribe(MAGNET_STORE_KEY)
for message in p.listen():
if message.get('type') != 'message':
continue
data = json.loads(message.get('data').decode())
if data.get('sid') == DOOR_SENSOR_SID and data.get('status') == 'open':
gateway_speaker.play(3) # Standard alarm sound
|
aluminiumgeek/goodbye-mihome
|
apps/sound_when_door_is_open.py
|
Python
|
bsd-2-clause
| 608 | 0 |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for provide recommendation based on analysis results"""
from decimal import Decimal
from typing import Union
from ml_eda.metadata import run_metadata_pb2
from ml_eda.reporting import template
# Thresholds
MISSING_THRESHOLD = 0.1
CARDINALITY_THRESHOLD = 100
CORRELATION_COEFFICIENT_THRESHOLD = 0.3
P_VALUE_THRESHOLD = 0.05
def check_missing(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether % of missing exceed threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of number of missing values
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
total = 0
missing = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.TOTAL_COUNT:
total = item.value
elif item.name == run_metadata_pb2.ScalarMetric.MISSING:
missing = item.value
if total == 0:
raise ValueError('The dataset is empty')
missing_rate = missing / total
if missing_rate > MISSING_THRESHOLD:
return template.HIGH_MISSING.format(
name=attribute_name,
value=missing_rate
)
return None
def check_cardinality(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the cardinality exceeds the predefined threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of cardinality
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
cardinality = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CARDINALITY:
cardinality = item.value
if cardinality > CARDINALITY_THRESHOLD:
return template.HIGH_CARDINALITY.format(
name=attribute_name,
value=cardinality
)
return None
def check_pearson_correlation(analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the correlation coefficients exceed the predefined threshold
Args:
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of pearson correlation
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
name_list = [att.name for att in analysis.features]
coefficient = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CORRELATION_COEFFICIENT:
coefficient = item.value
if abs(coefficient) > CORRELATION_COEFFICIENT_THRESHOLD:
return template.HIGH_CORRELATION.format(
name_one=name_list[0],
name_two=name_list[1],
metric='correlation coefficient',
value="{0:.2f}".format(coefficient)
)
return None
def check_p_value(analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the p-value of statistical tests
exceed the predefined threshold
Args:
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of statistical test
Returns:
Union[None, string]
"""
metric = analysis.smetrics[0]
analysis_name = run_metadata_pb2.Analysis.Name.Name(analysis.name)
name_list = [att.name for att in analysis.features]
p_value = metric.value
if p_value < P_VALUE_THRESHOLD:
return template.LOW_P_VALUE.format(
name_one=name_list[0],
name_two=name_list[1],
metric='p-value',
value="{:.2E}".format(Decimal(str(p_value))),
test_name=analysis_name
)
return None
|
CloudVLab/professional-services
|
tools/ml-auto-eda/ml_eda/reporting/recommendation.py
|
Python
|
apache-2.0
| 4,289 | 0.007694 |
# -*- coding: utf-8 -*-
###############################################################################
# License, author and contributors information in: #
# __openerp__.py file at the root folder of this module. #
###############################################################################
from openerp import models, fields, api
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval
from logging import getLogger
_logger = getLogger(__name__)
class DevelopmentToolsConfigSettings(models.TransientModel):
""" Module config settings
Fields:
email_to (Char): Address will be used to send captured email messages
email_capture (Boolean): Check it to capture outgoing email messages
developing_modules_enabled (Boolean): Sets the filter as default filter
in Local modules views
developing_module_ids (Many2many): Select items you want to display by
default Local modules views
search_default_app (Boolean): Enable search_default_app filter in the
Local modules view
"""
_name = 'development_tools.config.settings'
_description = u'Development tools config settings'
_inherit = ['res.config.settings']
_rec_name = 'id'
_order = 'id ASC'
# ---------------------------- ENTITY FIELDS ------------------------------
email_to = fields.Char(
string='Email to',
required=False,
readonly=False,
index=False,
help='Address will be used to send captured email messages',
size=50,
translate=False,
default='development_tools@yopmail.com',
)
email_capture = fields.Boolean(
string='Capture emails',
required=False,
readonly=False,
index=False,
default=True,
help='Check it to capture outgoing email messages',
)
developing_modules_enabled = fields.Boolean(
string='Set as default filter',
required=False,
readonly=False,
index=False,
default=False, # filter_model_name_whithout_module_development_modules
help='Sets the filter as default filter in Local modules views'
)
developing_module_ids = fields.Many2many(
string='Modules shown',
required=False,
readonly=False,
index=False,
default=None,
help='Select items you want to display by default Local modules views',
comodel_name='ir.module.module',
domain=[],
context={},
limit=None,
manual=True,
compute=lambda self: self._compute_developing_module_ids(),
inverse=lambda self: self._inverse_developing_module_ids()
)
search_default_app = fields.Boolean(
string='Search default app filter',
required=False,
readonly=False,
index=False,
default=False,
help='Enable search_default_app filter in the Local modules view'
)
development_mode = fields.Boolean(
string='Development mode as default',
required=False,
readonly=False,
index=False,
default=True,
help='Set development mode by default'
)
# ----------------------- COMPUTED FIELD METHODS --------------------------
def _compute_developing_module_ids(self):
for record in self:
record.developing_module_ids = record.get_developing_module_ids()
def _inverse_developing_module_ids(self):
try:
ids = [module.id for module in self.developing_module_ids]
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.domain = unicode([('id', 'in', ids or [-1])])
except Exception as ex:
_logger.error('_inverse_developing_module_ids: %s' % ex)
# --------------------- RES.CONFIG.SETTINGS METHODS -----------------------
@api.model
def get_default_values(self, values):
return dict(
email_to=self.get_email_to(),
email_capture=self.get_email_capture(),
developing_modules_enabled=self.get_developing_modules_enabled(),
developing_module_ids=self.get_developing_module_ids(),
search_default_app=self.get_search_default_app(),
development_mode=self.get_debug_mode(),
)
@api.one
def set_default_values(self):
self._set_email_to()
self._set_email_capture()
self._set_developing_modules_enabled()
self._set_developing_module_ids()
self._set_search_default_app()
self._set_debug_mode()
# ------------------------- GETTERS AND SETTERS ---------------------------
def get_email_to(self):
param = self._get_parameter('email_to')
return param.value if param else self._defaults['email_to']
def _set_email_to(self):
param = self._get_parameter('email_to', force=True)
param.value = self.email_to
def get_email_capture(self):
param = self._get_parameter('email_capture')
if param:
value = self._safe_eval(param.value, bool)
else:
value = self._defaults['email_capture']
return value
def _set_email_capture(self):
param = self._get_parameter('email_capture', force=True)
param.value = unicode(self.email_capture)
def get_developing_modules_enabled(self):
value = False
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
value = filter_set.is_default
except Exception as ex:
msg = self._not_retrieved.format('developing_modules_enabled', ex)
_logger.error(msg)
return value
def _set_developing_modules_enabled(self):
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.is_default = self.developing_modules_enabled
except Exception as ex:
msg = self._not_set('developing_modules_enabled', ex)
_logger.error(msg)
def get_developing_module_ids(self):
value = None
try:
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
domain = self._safe_eval(filter_set.domain, list)
value = filter(lambda x: x > 0, domain[0][2])
except Exception as ex:
msg = self._not_retrieved.format('developing_module_ids', ex)
_logger.error(msg)
return value
def _set_developing_module_ids(self):
try:
ids = [module.id for module in self.developing_module_ids]
name = 'filter_model_name_whithout_module_development_modules'
filter_set = self.env.ref('{}.{}'.format(self._module, name))
filter_set.domain = unicode([('id', 'in', ids or [-1])])
except Exception as ex:
msg = self._not_set('developing_module_ids', ex)
_logger.error(msg)
def get_search_default_app(self):
value = None
try:
action_set = self.env.ref('base.open_module_tree')
context = self._safe_eval(action_set.context, dict)
if 'search_default_app' in context:
value = context['search_default_app'] in [1, True]
else:
value = False
except Exception as ex:
msg = self._not_retrieved.format('search_default_app', ex)
_logger.error(msg)
return value
def _set_search_default_app(self):
try:
action_set = self.env.ref('base.open_module_tree')
context = self._safe_eval(action_set.context, dict)
value = 1 if self.search_default_app else 0
context.update({'search_default_app': value})
action_set.context = unicode(context)
except Exception as ex:
msg = self._not_set.format('search_default_app', ex)
_logger.error(msg)
def get_debug_mode(self):
param = self._get_parameter('development_mode')
if param:
value = self._safe_eval(param.value, bool)
else:
value = self._defaults['development_mode']
return value
def _set_debug_mode(self):
param = self._get_parameter('development_mode', force=True)
param.value = unicode(self.development_mode)
# --------------------------- #PUBLIC METHODS -----------------------------
def get_value(self, field_name):
""" Calls the appropiate method to retrieve the value of the field
with the given name and returns its value
:param field_name (char): name of the field
:returns: returns retrieved value or None
"""
result = None
try:
method_name = 'get_{}'.format(field_name)
method = getattr(self, method_name)
result = method()
except Exception as ex:
msg = self._not_value.format(field_name, ex)
_logger.error(msg)
return result
# -------------------------- AUXILIARY METHODS ----------------------------
def _get_parameter(self, field_name, force=False, default=u''):
""" Gets the ir.config_parameter for the field
:param field_name (char): name of the field
:force (bool): create record if not exists
:default (basestring): default parameter value if it is creted new
:return (ir.config_parameter): recordset with a record or empty
:note: Parameters could be searched by their external ids but if
they are created anew, then they could not be found
:note: Limit the search is not needed because the `key`column has
unique index constraint
:note: If there is not any matching record, the returned set will
be empty
"""
param_name = u'{}.{}'.format(self._module, field_name)
param_domain = [('key', '=', param_name)]
param_obj = self.env['ir.config_parameter']
param_set = param_obj.search(param_domain)
if not param_set and force:
param_set = param_obj.create(
{'key': param_name, 'value': default}
)
return param_set
def _safe_eval(self, value_str, types=None):
""" Try to convert an string in a value of one of the given types
:param value_str (basestring): string to be converted
:param types (type): type or iterable set of them
:return: value of one of the types if it could be converted or None
"""
value = None
try:
types = self._iterable(types)
value = safe_eval(value_str)
if not type(value) in types:
msg = self._check_type_msg.format(value_str, types)
_logger.error(msg)
value = None
except Exception as ex:
_logger.error(self._safe_eval_msg.format(value_str, ex))
return value
def _iterable(self, item):
""" Ensures the given item is iterable
:param: item to be tested
:return: item if it's iterable or the item within a list
"""
try:
iter(item)
item.__iter__()
except:
return [item]
else:
return item or [None]
# ----------------------- LONG CHARACTER STRINGS --------------------------
_safe_eval_msg = _(
u'Value \'{}\' could not be evaluated\n'
u'System has said: {}'
)
_check_type_msg = _(
u'Wrong type value `{}`, one of the following was expected: `{}`'
)
_not_retrieved = _(
u'The value `{}` could not be retrieved\n'
u'System has said: {}'
)
_not_set = _(
u'The value `{}` could not be set\n'
u'System has said: {}'
)
_not_value = _(
u'Could not retrive value for field `{}`\n'
u'System has said: {}'
)
|
odoousers2014/odoo-development
|
modules/development_tools/wizard/development_tools_config_settings.py
|
Python
|
agpl-3.0
| 12,322 | 0.000081 |
from __future__ import print_function
from __future__ import unicode_literals
from inspect import getdoc
from operator import attrgetter
import logging
import re
import signal
import sys
from docker.errors import APIError
import dockerpty
from .. import __version__
from .. import legacy
from ..const import DEFAULT_TIMEOUT
from ..project import NoSuchService, ConfigurationError
from ..service import BuildError, NeedsBuildError
from ..config import parse_environment
from ..progress_stream import StreamOutputError
from .command import Command
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import Formatter
from .log_printer import LogPrinter
from .utils import yesno, get_version_info
log = logging.getLogger(__name__)
def main():
setup_logging()
try:
command = TopLevelCommand()
command.sys_dispatch()
except KeyboardInterrupt:
log.error("\nAborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError, legacy.LegacyError) as e:
log.error(e.msg)
sys.exit(1)
except NoSuchCommand as e:
log.error("No such command: %s", e.command)
log.error("")
log.error("\n".join(parse_doc_section("commands:", getdoc(e.supercommand))))
sys.exit(1)
except APIError as e:
log.error(e.explanation)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
def setup_logging():
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setFormatter(logging.Formatter())
console_handler.setLevel(logging.INFO)
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(Command):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
Commands:
build Build or rebuild services
help Get help on a command
kill Kill containers
logs View output from containers
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
up Create and start containers
migrate-to-labels Recreate containers to add labels
version Show the Docker-Compose version information
"""
def docopt_options(self):
options = super(TopLevelCommand, self).docopt_options()
options['version'] = get_version_info('compose')
return options
def build(self, project, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--no-cache Do not use cache when building the image.
"""
no_cache = bool(options.get('--no-cache', False))
project.build(service_names=options['SERVICE'], no_cache=no_cache)
def help(self, project, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = self.get_handler(options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, project, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, project, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
"""
containers = project.containers(service_names=options['SERVICE'], stopped=True)
monochrome = options['--no-color']
print("Attaching to", list_containers(containers))
LogPrinter(containers, attach_params={'logs': True}, monochrome=monochrome).run()
def port(self, project, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, project, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
project.containers(service_names=options['SERVICE'], stopped=True) +
project.containers(service_names=options['SERVICE'], one_off=True),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, project, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
"""
insecure_registry = options['--allow-insecure-ssl']
project.pull(
service_names=options['SERVICE'],
insecure_registry=insecure_registry
)
def rm(self, project, options):
"""
Remove stopped service containers.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
"""
all_containers = project.containers(service_names=options['SERVICE'], stopped=True)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False)
)
else:
print("No stopped containers")
def run(self, project, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run container in the background, print
new container name.
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
"""
service = project.get_service(options['SERVICE'])
insecure_registry = options['--allow-insecure-ssl']
if not options['--no-deps']:
deps = service.get_linked_names()
if len(deps) > 0:
project.up(
service_names=deps,
start_deps=True,
allow_recreate=False,
insecure_registry=insecure_registry,
)
tty = True
if options['-d'] or options['-T'] or not sys.stdin.isatty():
tty = False
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = {
'command': command,
'tty': tty,
'stdin_open': not options['-d'],
'detach': options['-d'],
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
try:
container = service.create_container(
quiet=True,
one_off=True,
insecure_registry=insecure_registry,
**container_options
)
except APIError as e:
legacy.check_for_legacy_containers(
project.client,
project.name,
[service.name],
allow_one_off=False,
)
raise e
if options['-d']:
service.start_container(container)
print(container.name)
else:
dockerpty.start(project.client, container.id, interactive=not options['-T'])
exit_code = container.wait()
if options['--rm']:
project.client.remove_container(container.id)
sys.exit(exit_code)
def scale(self, project, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
project.get_service(service_name).scale(num, timeout=timeout)
def start(self, project, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
project.start(service_names=options['SERVICE'])
def stop(self, project, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, project, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
project.restart(service_names=options['SERVICE'], timeout=timeout)
def up(self, project, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [SERVICE...]
Options:
--allow-insecure-ssl Allow insecure connections to the docker
registry
-d Detached mode: Run containers in the background,
print new container names.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
"""
insecure_registry = options['--allow-insecure-ssl']
detached = options['-d']
monochrome = options['--no-color']
start_deps = not options['--no-deps']
allow_recreate = not options['--no-recreate']
force_recreate = options['--force-recreate']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
if force_recreate and not allow_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
to_attach = project.up(
service_names=service_names,
start_deps=start_deps,
allow_recreate=allow_recreate,
force_recreate=force_recreate,
insecure_registry=insecure_registry,
do_build=not options['--no-build'],
timeout=timeout
)
if not detached:
print("Attaching to", list_containers(to_attach))
log_printer = LogPrinter(to_attach, attach_params={"logs": True}, monochrome=monochrome)
try:
log_printer.run()
finally:
def handler(signal, frame):
project.kill(service_names=service_names)
sys.exit(0)
signal.signal(signal.SIGINT, handler)
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
def migrate_to_labels(self, project, _options):
"""
Recreate containers to add labels
If you're coming from Compose 1.2 or earlier, you'll need to remove or
migrate your existing containers after upgrading Compose. This is
because, as of version 1.3, Compose uses Docker labels to keep track
of containers, and so they need to be recreated with labels added.
If Compose detects containers that were created without labels, it
will refuse to run so that you don't end up with two sets of them. If
you want to keep using your existing containers (for example, because
they have data volumes you want to preserve) you can migrate them with
the following command:
docker-compose migrate-to-labels
Alternatively, if you're not worried about keeping them, you can
remove them - Compose will just create new ones.
docker rm -f myapp_web_1 myapp_db_1 ...
Usage: migrate-to-labels
"""
legacy.migrate_project_to_labels(project)
def version(self, project, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def list_containers(containers):
return ", ".join(c.name for c in containers)
|
feelobot/compose
|
compose/cli/main.py
|
Python
|
apache-2.0
| 19,689 | 0.001727 |
# Copyright 2018 Silvio Gregorini (silviogregorini@openforce.it)
# Copyright (c) 2018 Openforce Srls Unipersonale (www.openforce.it)
# Copyright (c) 2019 Matteo Bilotta
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
sp_description = fields.Char(
related="company_id.sp_description",
string="Description for period end statements",
readonly=False,
)
|
OCA/l10n-italy
|
l10n_it_vat_statement_split_payment/models/account_config.py
|
Python
|
agpl-3.0
| 520 | 0 |
"""General-use classes to interact with the ApplicationAutoScaling service through CloudFormation.
See Also:
`AWS developer guide for ApplicationAutoScaling
<https://docs.aws.amazon.com/autoscaling/application/APIReference/Welcome.html>`_
"""
# noinspection PyUnresolvedReferences
from .._raw import applicationautoscaling as _raw
# noinspection PyUnresolvedReferences
from .._raw.applicationautoscaling import *
|
garyd203/flying-circus
|
src/flyingcircus/service/applicationautoscaling.py
|
Python
|
lgpl-3.0
| 424 | 0.002358 |
# Created By: Virgil Dupras
# Created On: 2009-11-27
# Copyright 2015 Hardcoded Software (http://www.hardcoded.net)
#
# This software is licensed under the "GPLv3" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.gnu.org/licenses/gpl-3.0.html
from hscommon.trans import trget
from core.document import FilterType
from ..filter_bar import FilterBar
tr = trget('ui')
class TransactionFilterBar(FilterBar):
BUTTONS = [
(tr("All"), None),
(tr("Income"), FilterType.Income),
(tr("Expenses"), FilterType.Expense),
(tr("Transfers"), FilterType.Transfer),
(tr("Unassigned"), FilterType.Unassigned),
(tr("Reconciled"), FilterType.Reconciled),
(tr("Not Reconciled"), FilterType.NotReconciled),
]
|
fokusov/moneyguru
|
qt/controller/transaction/filter_bar.py
|
Python
|
gpl-3.0
| 843 | 0.005931 |
# -*- coding: UTF-8
""" Entry decorators for python plugins
Functions
=========
chat_message -- Decorator for chat message plugin
chat_command -- Decorator for chat command plugin
chat_accost -- Decorator for chat accost plugin
"""
from dewyatochka.core.plugin.loader.internal import entry_point
from dewyatochka.core.plugin.exceptions import PluginRegistrationError
from .matcher import PLUGIN_TYPE_COMMAND, PLUGIN_TYPE_MESSAGE, PLUGIN_TYPE_ACCOST
__all__ = ['chat_command', 'chat_message', 'chat_accost']
# Commands already in use
_reserved_commands = set()
def chat_message(fn=None, *, services=None, regular=False, system=False, own=False) -> callable:
""" Decorator to mark function as message handler entry point
:param callable fn: Function if decorator is invoked directly
:param list services: Dependent services list
:param bool regular: Register this handler for regular messages
:param bool system: Register this handler for system messages
:param bool own: Register this handler for own messages
:return callable:
"""
return entry_point(PLUGIN_TYPE_MESSAGE, services=services, regular=True, system=False, own=False)(fn) \
if fn is not None else \
entry_point(PLUGIN_TYPE_MESSAGE, services=services, regular=regular, system=system, own=own)
def chat_command(command, *, services=None) -> callable:
""" Register handler for chat command
:param list services: Dependent services list
:param str command: Command name without prefix
:return callable:
"""
if command in _reserved_commands:
raise PluginRegistrationError('Chat command %s is already in use' % command)
_reserved_commands.add(command)
return entry_point(PLUGIN_TYPE_COMMAND, services=services, command=command)
def chat_accost(fn=None, *, services=None) -> callable:
""" Register handler for a chat personal accost
:param callable fn: Function if decorator is invoked directly
:param list services: Dependent services list
:return callable:
"""
entry_point_fn = entry_point(PLUGIN_TYPE_ACCOST, services=services)
return entry_point_fn(fn) if fn is not None else entry_point_fn
|
kawashiro/dewyatochka2
|
src/dewyatochka/core/plugin/subsystem/message/py_entry.py
|
Python
|
gpl-3.0
| 2,198 | 0.002275 |
# -*- coding: utf-8 -*-
'''
Uncoded Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urlparse,sys,urllib
params = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))
action = params.get('action')
name = params.get('name')
title = params.get('title')
year = params.get('year')
imdb = params.get('imdb')
tvdb = params.get('tvdb')
tmdb = params.get('tmdb')
season = params.get('season')
episode = params.get('episode')
tvshowtitle = params.get('tvshowtitle')
premiered = params.get('premiered')
url = params.get('url')
image = params.get('image')
meta = params.get('meta')
select = params.get('select')
query = params.get('query')
source = params.get('source')
content = params.get('content')
windowedtrailer = params.get('windowedtrailer')
windowedtrailer = int(windowedtrailer) if windowedtrailer in ("0","1") else 0
if action == None:
from resources.lib.indexers import navigator
from resources.lib.modules import cache
cache.cache_version_check()
navigator.navigator().root()
elif action == 'movieNavigator':
from resources.lib.indexers import navigator
navigator.navigator().movies()
elif action == 'movieliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().movies(lite=True)
elif action == 'mymovieNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mymovies()
elif action == 'mymovieliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mymovies(lite=True)
elif action == 'tvNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tvshows()
elif action == 'tvliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tvshows(lite=True)
elif action == 'mytvNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mytvshows()
elif action == 'mytvliteNavigator':
from resources.lib.indexers import navigator
navigator.navigator().mytvshows(lite=True)
elif action == 'downloadNavigator':
from resources.lib.indexers import navigator
navigator.navigator().downloads()
elif action == 'libraryNavigator':
from resources.lib.indexers import navigator
navigator.navigator().library()
elif action == 'toolNavigator':
from resources.lib.indexers import navigator
navigator.navigator().tools()
elif action == 'searchNavigator':
from resources.lib.indexers import navigator
navigator.navigator().search()
elif action == 'viewsNavigator':
from resources.lib.indexers import navigator
navigator.navigator().views()
elif action == 'clearCache':
from resources.lib.indexers import navigator
navigator.navigator().clearCache()
elif action == 'clearCacheSearch':
from resources.lib.indexers import navigator
navigator.navigator().clearCacheSearch()
elif action == 'infoCheck':
from resources.lib.indexers import navigator
navigator.navigator().infoCheck('')
elif action == 'movies':
from resources.lib.indexers import movies
movies.movies().get(url)
elif action == 'moviePage':
from resources.lib.indexers import movies
movies.movies().get(url)
elif action == 'movieWidget':
from resources.lib.indexers import movies
movies.movies().widget()
elif action == 'movieSearch':
from resources.lib.indexers import movies
movies.movies().search()
elif action == 'movieSearchnew':
from resources.lib.indexers import movies
movies.movies().search_new()
elif action == 'movieSearchterm':
from resources.lib.indexers import movies
movies.movies().search_term(name)
elif action == 'moviePerson':
from resources.lib.indexers import movies
movies.movies().person()
elif action == 'movieGenres':
from resources.lib.indexers import movies
movies.movies().genres()
elif action == 'movieLanguages':
from resources.lib.indexers import movies
movies.movies().languages()
elif action == 'movieCertificates':
from resources.lib.indexers import movies
movies.movies().certifications()
elif action == 'movieYears':
from resources.lib.indexers import movies
movies.movies().years()
elif action == 'moviePersons':
from resources.lib.indexers import movies
movies.movies().persons(url)
elif action == 'movieUserlists':
from resources.lib.indexers import movies
movies.movies().userlists()
elif action == 'channels':
from resources.lib.indexers import channels
channels.channels().get()
elif action == 'tvshows':
from resources.lib.indexers import tvshows
tvshows.tvshows().get(url)
elif action == 'tvshowPage':
from resources.lib.indexers import tvshows
tvshows.tvshows().get(url)
elif action == 'tvSearch':
from resources.lib.indexers import tvshows
tvshows.tvshows().search()
elif action == 'tvSearchnew':
from resources.lib.indexers import tvshows
tvshows.tvshows().search_new()
elif action == 'tvSearchterm':
from resources.lib.indexers import tvshows
tvshows.tvshows().search_term(name)
elif action == 'tvPerson':
from resources.lib.indexers import tvshows
tvshows.tvshows().person()
elif action == 'tvGenres':
from resources.lib.indexers import tvshows
tvshows.tvshows().genres()
elif action == 'tvNetworks':
from resources.lib.indexers import tvshows
tvshows.tvshows().networks()
elif action == 'tvLanguages':
from resources.lib.indexers import tvshows
tvshows.tvshows().languages()
elif action == 'tvCertificates':
from resources.lib.indexers import tvshows
tvshows.tvshows().certifications()
elif action == 'tvPersons':
from resources.lib.indexers import tvshows
tvshows.tvshows().persons(url)
elif action == 'tvUserlists':
from resources.lib.indexers import tvshows
tvshows.tvshows().userlists()
elif action == 'seasons':
from resources.lib.indexers import episodes
episodes.seasons().get(tvshowtitle, year, imdb, tvdb)
elif action == 'episodes':
from resources.lib.indexers import episodes
episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, episode)
elif action == 'calendar':
from resources.lib.indexers import episodes
episodes.episodes().calendar(url)
elif action == 'tvWidget':
from resources.lib.indexers import episodes
episodes.episodes().widget()
elif action == 'calendars':
from resources.lib.indexers import episodes
episodes.episodes().calendars()
elif action == 'episodeUserlists':
from resources.lib.indexers import episodes
episodes.episodes().userlists()
elif action == 'refresh':
from resources.lib.modules import control
control.refresh()
elif action == 'queueItem':
from resources.lib.modules import control
control.queueItem()
elif action == 'openSettings':
from resources.lib.modules import control
control.openSettings(query)
elif action == 'artwork':
from resources.lib.modules import control
control.artwork()
elif action == 'addView':
from resources.lib.modules import views
views.addView(content)
elif action == 'moviePlaycount':
from resources.lib.modules import playcount
playcount.movies(imdb, query)
elif action == 'episodePlaycount':
from resources.lib.modules import playcount
playcount.episodes(imdb, tvdb, season, episode, query)
elif action == 'tvPlaycount':
from resources.lib.modules import playcount
playcount.tvshows(name, imdb, tvdb, season, query)
elif action == 'trailer':
from resources.lib.modules import trailer
trailer.trailer().play(name, url, windowedtrailer)
elif action == 'traktManager':
from resources.lib.modules import trakt
trakt.manager(name, imdb, tvdb, content)
elif action == 'authTrakt':
from resources.lib.modules import trakt
trakt.authTrakt()
elif action == 'smuSettings':
try: import urlresolver
except: pass
urlresolver.display_settings()
elif action == 'download':
import json
from resources.lib.modules import sources
from resources.lib.modules import downloader
try: downloader.download(name, image, sources.sources().sourcesResolve(json.loads(source)[0], True))
except: pass
elif action == 'play':
from resources.lib.modules import sources
sources.sources().play(title, year, imdb, tvdb, season, episode, tvshowtitle, premiered, meta, select)
elif action == 'addItem':
from resources.lib.modules import sources
sources.sources().addItem(title)
elif action == 'playItem':
from resources.lib.modules import sources
sources.sources().playItem(title, source)
elif action == 'alterSources':
from resources.lib.modules import sources
sources.sources().alterSources(url, meta)
elif action == 'clearSources':
from resources.lib.modules import sources
sources.sources().clearSources()
elif action == 'random':
rtype = params.get('rtype')
if rtype == 'movie':
from resources.lib.indexers import movies
rlist = movies.movies().get(url, create_directory=False)
r = sys.argv[0]+"?action=play"
elif rtype == 'episode':
from resources.lib.indexers import episodes
rlist = episodes.episodes().get(tvshowtitle, year, imdb, tvdb, season, create_directory=False)
r = sys.argv[0]+"?action=play"
elif rtype == 'season':
from resources.lib.indexers import episodes
rlist = episodes.seasons().get(tvshowtitle, year, imdb, tvdb, create_directory=False)
r = sys.argv[0]+"?action=random&rtype=episode"
elif rtype == 'show':
from resources.lib.indexers import tvshows
rlist = tvshows.tvshows().get(url, create_directory=False)
r = sys.argv[0]+"?action=random&rtype=season"
from resources.lib.modules import control
from random import randint
import json
try:
rand = randint(1,len(rlist))-1
for p in ['title','year','imdb','tvdb','season','episode','tvshowtitle','premiered','select']:
if rtype == "show" and p == "tvshowtitle":
try: r += '&'+p+'='+urllib.quote_plus(rlist[rand]['title'])
except: pass
else:
try: r += '&'+p+'='+urllib.quote_plus(rlist[rand][p])
except: pass
try: r += '&meta='+urllib.quote_plus(json.dumps(rlist[rand]))
except: r += '&meta='+urllib.quote_plus("{}")
if rtype == "movie":
try: control.infoDialog(rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000)
except: pass
elif rtype == "episode":
try: control.infoDialog(rlist[rand]['tvshowtitle']+" - Season "+rlist[rand]['season']+" - "+rlist[rand]['title'], control.lang(32536).encode('utf-8'), time=30000)
except: pass
control.execute('RunPlugin(%s)' % r)
except:
control.infoDialog(control.lang(32537).encode('utf-8'), time=8000)
elif action == 'movieToLibrary':
from resources.lib.modules import libtools
libtools.libmovies().add(name, title, year, imdb, tmdb)
elif action == 'moviesToLibrary':
from resources.lib.modules import libtools
libtools.libmovies().range(url)
elif action == 'tvshowToLibrary':
from resources.lib.modules import libtools
libtools.libtvshows().add(tvshowtitle, year, imdb, tvdb)
elif action == 'tvshowsToLibrary':
from resources.lib.modules import libtools
libtools.libtvshows().range(url)
elif action == 'updateLibrary':
from resources.lib.modules import libtools
libtools.libepisodes().update(query)
elif action == 'service':
from resources.lib.modules import libtools
libtools.libepisodes().service()
|
TheWardoctor/Wardoctors-repo
|
plugin.video.uncoded/uncoded.py
|
Python
|
apache-2.0
| 12,280 | 0.003746 |
# %FILEHEADER%
from ..filebased import FileBasedBackend
from .. import NONE, MissingOption
from xmlserialize import serialize_to_file, unserialize_file
from lxml.etree import XMLSyntaxError
class XMLBackend(dict, FileBasedBackend):
ROOT_ELEMENT = 'configuration'
initial_file_content = '<{0}></{0}>'.format(ROOT_ELEMENT)
def __init__(self, backref, extension='xml', filename=None):
dict.__init__(self)
FileBasedBackend.__init__(self, backref, extension, filename)
def read(self):
try:
return unserialize_file(self.file)
except XMLSyntaxError, err:
self.log('Could not parse XML configuration file: %s' % err,
level='error')
def save(self):
serialize_to_file(self, self.file, root_tag=self.ROOT_ELEMENT)
def get_option(self, item):
try:
return self.__getitem__(item)
except KeyError:
raise MissingOption(item)
set_option = dict.__setitem__
options = property(lambda self:self.keys())
tree = property(lambda self:self)
def reset_all(self):
self._create_file()
self.clear()
|
jonashaag/gpyconf
|
gpyconf/backends/_xml/__init__.py
|
Python
|
lgpl-2.1
| 1,161 | 0.002584 |
import re
import urllib
from xbmcswift2 import xbmc
from meta import plugin, LANG
from meta.gui import dialogs
from meta.utils.text import to_unicode
from meta.library.live import get_player_plugin_from_library
from meta.navigation.base import get_icon_path, get_background_path
from meta.play.players import get_needed_langs, ADDON_SELECTOR
from meta.play.channelers import get_needed_langs, ADDON_PICKER
from meta.play.base import active_players, active_channelers, action_cancel, action_play, on_play_video
from settings import SETTING_USE_SIMPLE_SELECTOR, SETTING_LIVE_DEFAULT_PLAYER_FROM_CONTEXT, SETTING_LIVE_DEFAULT_PLAYER_FROM_LIBRARY, SETTING_LIVE_DEFAULT_PLAYER, SETTING_LIVE_LIBRARY_FOLDER, SETTING_LIVE_DEFAULT_CHANNELER
from language import get_string as _
def play_channel(channel, program, language, mode):
# Get players to use
if mode == 'select':
play_plugin = ADDON_SELECTOR.id
elif mode == 'context':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_CONTEXT, unicode)
elif mode == 'library':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER_FROM_LIBRARY, unicode)
elif mode == 'default':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_PLAYER, unicode)
else:
play_plugin = mode
players = active_players("live")
players = [p for p in players if p.id == play_plugin] or players
if not players:
dialogs.notify(msg="{0} {1} {2}".format(_("No cache").replace(_("Cache").lower(),_("TV")), _("Player").lower(), _("Enabled").lower()), title=_("Error"), delay=5000, image=get_icon_path("live"))
action_cancel()
return
# Get parameters
params = {}
for lang in get_needed_langs(players):
params[lang] = get_channel_parameters(channel, program, language)
params[lang] = to_unicode(params[lang])
# Go for it
link = on_play_video(mode, players, params)
if link:
action_play({
'label': channel,
'path': link,
'is_playable': True,
'info_type': 'video',
})
def play_channel_from_guide(channel, program, language, mode):
# Get channelers to use
if mode == 'select':
play_plugin = ADDON_PICKER.id
elif mode == 'default':
play_plugin = plugin.get_setting(SETTING_LIVE_DEFAULT_CHANNELER, unicode)
else:
play_plugin = mode
channelers = active_channelers("live")
channelers = [p for p in channelers if p.id == play_plugin] or channelers
if not channelers:
dialogs.notify(msg="{0} {1} {2}".format(_("No cache").replace(_("Cache").lower(),_("TV")), _("Player").lower(), _("Enabled").lower()), title=_("Error"), delay=5000, image=get_icon_path("live"))
action_cancel()
return
# Get parameters
params = {}
for lang in get_needed_langs(channelers):
params[lang] = get_channel_parameters(channel, program, language)
params[lang] = to_unicode(params[lang])
# Go for it
link = on_play_video(mode, channelers, params)
if link:
action_play({
'label': channel,
'path': link,
'is_playable': True,
'info_type': 'video',
})
def get_channel_parameters(channel, program, language):
channel_regex = re.compile("(.+?)\s*(\d+|one|two|three|four|five|six|seven|eight|nine|ten)\s*.*?(\d*)$",
re.IGNORECASE|re.UNICODE)
parameters = {}
parameters['name'] = channel
parameters['urlname'] = urllib.quote(parameters['name'])
parameters['shortname'] = parameters['name'][1:-1]
parameters['basename'] = re.sub(channel_regex, r"\1",channel)
parameters['shortbasename'] = parameters['basename'][1:-1]
parameters['extension'] = re.sub(channel_regex, r"\2",channel)
parameters['delay'] = re.sub(channel_regex, r"\3", channel)
parameters['program'] = program
parameters['language'] = language
return parameters
|
TheWardoctor/Wardoctors-repo
|
plugin.video.metalliq/resources/lib/meta/play/live.py
|
Python
|
apache-2.0
| 3,977 | 0.005029 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db import models
from django.db.models import Q
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from enumfields import EnumIntegerField
from jsonfield.fields import JSONField
from shuup.core.fields import InternalIdentifierField
from shuup.notify.enums import Priority, RecipientType
class NotificationManager(models.Manager):
def for_user(self, user):
"""
:type user: django.contrib.auth.models.AbstractUser
"""
if not user or user.is_anonymous():
return self.none()
q = (Q(recipient_type=RecipientType.SPECIFIC_USER) & Q(recipient=user))
if getattr(user, 'is_superuser', False):
q |= Q(recipient_type=RecipientType.ADMINS)
return self.filter(q)
def unread_for_user(self, user):
return self.for_user(user).exclude(marked_read=True)
class Notification(models.Model):
"""
A model for persistent notifications to be shown in the admin, etc.
"""
recipient_type = EnumIntegerField(RecipientType, default=RecipientType.ADMINS, verbose_name=_('recipient type'))
recipient = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('recipient')
)
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_('created on'))
message = models.CharField(max_length=140, editable=False, default="", verbose_name=_('message'))
identifier = InternalIdentifierField(unique=False)
priority = EnumIntegerField(Priority, default=Priority.NORMAL, db_index=True, verbose_name=_('priority'))
_data = JSONField(blank=True, null=True, editable=False, db_column="data")
marked_read = models.BooleanField(db_index=True, editable=False, default=False, verbose_name=_('marked read'))
marked_read_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, editable=False, related_name="+", on_delete=models.SET_NULL,
verbose_name=_('marked read by')
)
marked_read_on = models.DateTimeField(null=True, blank=True, verbose_name=_('marked read on'))
objects = NotificationManager()
def __init__(self, *args, **kwargs):
url = kwargs.pop("url", None)
super(Notification, self).__init__(*args, **kwargs)
if url:
self.url = url
def save(self, *args, **kwargs):
if self.recipient_type == RecipientType.SPECIFIC_USER and not self.recipient_id:
raise ValueError("With RecipientType.SPECIFIC_USER, recipient is required")
super(Notification, self).save(*args, **kwargs)
def mark_read(self, user):
if self.marked_read:
return False
self.marked_read = True
self.marked_read_by = user
self.marked_read_on = now()
self.save(update_fields=('marked_read', 'marked_read_by', 'marked_read_on'))
return True
@property
def is_read(self):
return self.marked_read
@property
def data(self):
if not self._data:
self._data = {}
return self._data
@property
def url(self):
url = self.data.get("_url")
if isinstance(url, dict):
return reverse(**url)
return url
@url.setter
def url(self, value):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
self.data["_url"] = value
def set_reverse_url(self, **reverse_kwargs):
if self.pk:
raise ValueError("URL can't be set on a saved notification")
try:
reverse(**reverse_kwargs)
except NoReverseMatch: # pragma: no cover
raise ValueError("Invalid reverse URL parameters")
self.data["_url"] = reverse_kwargs
|
suutari/shoop
|
shuup/notify/models/notification.py
|
Python
|
agpl-3.0
| 4,222 | 0.002605 |
#
# Copyright 2016-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
"""
This module provides event loop based infrastructure for scalable storage
health monitoring:
CheckService entry point for starting and stopping path checkers.
DirectioChecker checker using dd process for file or block based
volumes.
CheckResult result object provided to user callback on each check.
"""
from __future__ import absolute_import
import logging
import re
import subprocess
import threading
from vdsm import cmdutils
from vdsm import concurrent
from vdsm import constants
from vdsm.common.compat import CPopen
from vdsm.storage import asyncevent
from vdsm.storage import asyncutils
from vdsm.storage import exception
EXEC_ERROR = 127
_log = logging.getLogger("storage.check")
class CheckService(object):
"""
Provides path checking service.
This object is a simple thread safe entry point for starting and stopping
path checkers, keeping the internals decoupled from client code.
Usage:
# Start the service
service = CheckService()
service.start()
# Start checking path
service.start_checking(path, complete)
# Stop checking path, waiting up to 30 seconds
service.stop_checking(path, timeout=30)
# Stop the service
service.stop()
"""
def __init__(self):
self._lock = threading.Lock()
self._loop = asyncevent.EventLoop()
self._thread = concurrent.thread(self._loop.run_forever,
name="check/loop")
self._checkers = {}
def start(self):
"""
Start the service thread.
"""
_log.info("Starting check service")
self._thread.start()
def stop(self):
"""
Stop all checkers and the service thread.
Do not wait for running check processes since the application is
shutting down. To wait for all processes, stop all checkers and wait
for them before stoping.
"""
if not self._thread.is_alive():
return
_log.info("Stopping check service")
with self._lock:
for checker in self._checkers.values():
self._loop.call_soon_threadsafe(checker.stop)
self._checkers.clear()
self._loop.call_soon_threadsafe(self._loop.stop)
self._thread.join()
self._loop.close()
def start_checking(self, path, complete, interval=10.0):
"""
Start checking path every interval secconds. On check, invoke the
complete callback with a CheckResult instance.
Note that the complete callback is invoked in the check thread, and
must not block, as it will block all other checkers.
"""
_log.info("Start checking %r", path)
with self._lock:
if path in self._checkers:
raise RuntimeError("Already checking path %r" % path)
checker = DirectioChecker(self._loop, path, complete,
interval=interval)
self._checkers[path] = checker
self._loop.call_soon_threadsafe(checker.start)
def stop_checking(self, path, timeout=None):
"""
Stop checking path. If timeout is set, wait until the checker has
stopped, or the timeout has expired.
"""
_log.info("Stop checking %r", path)
with self._lock:
checker = self._checkers.pop(path)
self._loop.call_soon_threadsafe(checker.stop)
if timeout:
return checker.wait(timeout)
def is_checking(self, path):
return path in self._checkers
# Checker state
IDLE = "idle"
RUNNING = "running"
STOPPING = "stopping"
class DirectioChecker(object):
"""
Check path availability using direct I/O.
DirectioChecker is created with a complete callback. Each time a check
cycle is completed, the complete callback will be invoked with a
CheckResult instance.
CheckResult provides a delay() method returning the read delay in
seconds. If the check failed, the delay() method will raise the
appropriate exception that can be reported to engine.
Note that the complete callback must not block as it will block the entire
event loop thread.
The checker runs exactly every interval seconds. If a check did not
complete before the next check is scheduled, the next check will be delayed
to the next interval.
Checker is not thread safe. Use EventLoop.call_soon_threadsafe() to start
or stop a checker. The only thread safe method is wait().
Usage::
# Start the event loop thread
loop = asyncevent.EventLoop()
concurrent.thread(loop.run_forever).start()
# The complete callback
def complete(result):
try:
check_delay = result.delay()
except Exception as e:
check_error = e
check_time = time.time()
# Start a checker on the event loop thread
checker = DirectioChecker(loop, path, complete)
loop.call_soon_threadsafe(checker.start)
...
# Stop a checker from another thread
loop.call_soon_threadsafe(checker.stop)
# If needed, wait until a checker actually stopped.
checker.wait(30)
"""
log = logging.getLogger("storage.directiochecker")
def __init__(self, loop, path, complete, interval=10.0):
self._loop = loop
self._path = path
self._complete = complete
self._interval = interval
self._looper = asyncutils.LoopingCall(loop, self._check)
self._check_time = None
self._proc = None
self._reader = None
self._reaper = None
self._err = None
self._state = IDLE
self._stopped = threading.Event()
def start(self):
"""
Start the checker.
Raises RuntimeError if the checker is running.
"""
if self._state is not IDLE:
raise RuntimeError("Checker is %s", self._state)
self._state = RUNNING
_log.debug("Checker %r started", self._path)
self._stopped.clear()
self._looper.start(self._interval)
def stop(self):
"""
Stop the checker.
If the checker is waiting for the next check, the next check will be
cancelled. If the checker is in the middle of a check, it will stop
when the check completes.
If the checker is not running, the call is ignored silently.
"""
if self._state is not RUNNING:
return
_log.debug("Checker %r stopping", self._path)
self._state = STOPPING
self._looper.stop()
if self._proc is None:
self._stop_completed()
def wait(self, timeout=None):
"""
Wait until a checker has stopped.
Returns True if checker has stopped, False if timeout expired.
"""
return self._stopped.wait(timeout)
def is_running(self):
return self._state is not IDLE
def _stop_completed(self):
self._state = IDLE
_log.debug("Checker %r stopped", self._path)
self._stopped.set()
def _check(self):
"""
Called when starting the checker, and then every interval seconds until
the checker is stopped.
"""
assert self._state is RUNNING
if self._proc:
_log.warning("Checker %r is blocked for %.2f seconds",
self._path, self._loop.time() - self._check_time)
return
self._check_time = self._loop.time()
_log.debug("START check %r (delay=%.2f)",
self._path, self._check_time - self._looper.deadline)
try:
self._start_process()
except Exception as e:
self._err = "Error starting process: %s" % e
self._check_completed(EXEC_ERROR)
def _start_process(self):
"""
Starts a dd process performing direct I/O to path, reading the process
stderr. When stderr has closed, _read_completed will be called.
"""
cmd = [constants.EXT_DD, "if=%s" % self._path, "of=/dev/null",
"bs=4096", "count=1", "iflag=direct"]
cmd = cmdutils.wrap_command(cmd)
self._proc = CPopen(cmd, stdin=None, stdout=None,
stderr=subprocess.PIPE)
self._reader = self._loop.create_dispatcher(
asyncevent.BufferedReader, self._proc.stderr, self._read_completed)
def _read_completed(self, data):
"""
Called when dd process has closed stderr. At this point the process may
be still running.
"""
assert self._state is not IDLE
self._reader = None
self._err = data
rc = self._proc.poll()
# About 95% of runs, the process has terminated at this point. If not,
# start the reaper to wait for it.
if rc is None:
self._reaper = asyncevent.Reaper(self._loop, self._proc,
self._check_completed)
return
self._check_completed(rc)
def _check_completed(self, rc):
"""
Called when the dd process has exited with exit code rc.
"""
assert self._state is not IDLE
now = self._loop.time()
elapsed = now - self._check_time
_log.debug("FINISH check %r (rc=%s, elapsed=%.02f)",
self._path, rc, elapsed)
self._reaper = None
self._proc = None
if self._state is STOPPING:
self._stop_completed()
return
result = CheckResult(self._path, rc, self._err, self._check_time,
elapsed)
self._complete(result)
def __repr__(self):
info = [self.__class__.__name__,
self._path,
self._state]
if self._state is RUNNING:
info.append("next_check=%.2f" % self._looper.deadline)
return "<%s at 0x%x>" % (" ".join(info), id(self))
class CheckResult(object):
_PATTERN = re.compile(br".*, ([\de\-.]+) s,[^,]+")
def __init__(self, path, rc, err, time, elapsed):
self.path = path
self.rc = rc
self.err = err
self.time = time
self.elapsed = elapsed
def delay(self):
# TODO: Raising MiscFileReadException for all errors to keep the old
# behavior. Should probably use StorageDomainAccessError.
if self.rc != 0:
raise exception.MiscFileReadException(self.path, self.rc, self.err)
if not self.err:
raise exception.MiscFileReadException(self.path, "no stats")
stats = self.err.splitlines()[-1]
match = self._PATTERN.match(stats)
if not match:
raise exception.MiscFileReadException(self.path,
"no match: %r" % stats)
seconds = match.group(1)
try:
return float(seconds)
except ValueError as e:
raise exception.MiscFileReadException(self.path, e)
def __repr__(self):
return "<%s path=%s rc=%d err=%r time=%.2f elapsed=%.2f at 0x%x>" % (
self.__class__.__name__, self.path, self.rc, self.err, self.time,
self.elapsed, id(self))
|
EdDev/vdsm
|
lib/vdsm/storage/check.py
|
Python
|
gpl-2.0
| 12,176 | 0 |
# coding: utf-8
#
# Copyright © 2012-2015 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
DEFAULT_EXTENSIONS = {"java":"java", "c":"c", "cc":"c", "cpp":"cpp", "h":"cpp", "hh":"cpp", "hpp":"cpp", "py":"python",
"glsl":"opengl", "rb":"ruby", "js":"javascript", "sql":"sql", "fltar":"ansible","pkb":"sql",
"pks":"sql","txt":"text", "drt":"drools", "drl":"drools", "bpmn":"processes", "kt":"kotlin"}
__extensions__ = DEFAULT_EXTENSIONS.keys()
__extensions_dict__ = DEFAULT_EXTENSIONS
__located_extensions__ = set()
def get():
return __extensions__
def get_dict():
return __extensions_dict__
def define(string):
global __extensions__
__extensions__ = string.split(",")
def add_located(string):
if len(string) == 0:
__located_extensions__.add("*")
else:
__located_extensions__.add(string)
def get_located():
return __located_extensions__
|
eklochkov/gitinspector
|
gitinspector/extensions.py
|
Python
|
gpl-3.0
| 1,635 | 0.015912 |
# The contents of this file are subject to the Mozilla Public License
# Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
#
# OS2Webscanner was developed by Magenta in collaboration with OS2 the
# Danish community of open source municipalities (http://www.os2web.dk/).
#
# The code is currently governed by OS2 the Danish community of open
# source municipalities ( http://www.os2web.dk/ )
"""HTML Processors."""
from .processor import Processor
from .text import TextProcessor
import logging
import os
import xmltodict
import json
from xml.parsers.expat import ExpatError
from .html import HTMLProcessor
class XmlProcessor(HTMLProcessor):
"""Processor for XMLdocuments.
When processing, converts document to json one line including all attributes
Immediately processes with TextProcessor after processing.
"""
item_type = "xml"
text_processor = TextProcessor()
def handle_spider_item(self, data, url_object):
"""Immediately process the spider item."""
return self.process(data, url_object)
def handle_queue_item(self, item):
"""Immediately process the queue item."""
result = self.process_file(item.file_path, item.url)
if os.path.exists(item.file_path):
os.remove(item.file_path)
return result
def process(self, data, url_object):
"""Process XML data.
Converts document to json before processing with TextProcessor.
if XML is not well formed, treat it as HTML
"""
logging.info("Process XML %s" % url_object.url)
try:
data = json.dumps(xmltodict.parse(data))
return self.text_processor.process(data, url_object)
except ExpatError:
return super(XmlProcessor,self).process(data,url_object)
Processor.register_processor(XmlProcessor.item_type, XmlProcessor)
|
os2webscanner/os2webscanner
|
scrapy-webscanner/scanners/processors/xml.py
|
Python
|
mpl-2.0
| 2,214 | 0.002258 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
version = None
with open(os.path.join(os.getcwd(), "jupyterlab_gitsync", "version.py")) as f:
for l in f:
if l.startswith("VERSION"):
version = l.rstrip().split(" = ")[1].replace("'", "")
if not version:
raise RuntimeError("Unable to determine version")
npm_package = "jupyterlab_gitsync-{}.tgz".format(version)
if not os.path.exists(os.path.join(os.getcwd(), npm_package)):
raise FileNotFoundError("Cannot find NPM package. Did you run `npm pack`?")
data_files = [
("share/jupyter/lab/extensions", (npm_package,)),
("etc/jupyter/jupyter_notebook_config.d",
("jupyter-config/jupyter_notebook_config.d/jupyterlab_gitsync.json",)),
]
setup(
name="jupyterlab_gitsync",
version=version,
description="JupyterLab Git Sync",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/GoogleCloudPlatform/jupyter-extensions",
data_files=data_files,
license="Apache License 2.0",
packages=find_packages(),
python_requires=">=3.6",
install_requires=[
"jupyterlab~=1.2.0",
"gcp_jupyterlab_shared>=1.0.11",
],
)
|
GoogleCloudPlatform/jupyter-extensions
|
jupyterlab_gitsync/setup.py
|
Python
|
apache-2.0
| 1,836 | 0.003268 |
import logging
from time import sleep
from tools.audiosink import AudioSink
demo = logging.getLogger('Demo')
logging.basicConfig(level=logging.DEBUG)
p = AudioSink()
with open("sample.wav", 'rb') as f:
a = f.read()
demo.info("add the first track")
p.add(a, "a")
sleep(2)
with open("sample.wav", 'rb') as f:
b = f.read()
demo.info("add a second track")
p.add(b,"b")
sleep(5)
demo.info("remove the first track")
p.remove("a")
sleep(5)
demo.info("lower the volume to 40%")
p.volume = 40
sleep(15)
demo.info("close the AudioSink")
p.close()
|
loult-elte-fwere/termiloult
|
tests/test_multiple_play.py
|
Python
|
mit
| 559 | 0.001789 |
import numpy
import os
import sys
# This script depends on a SJSON parsing package:
# https://pypi.python.org/pypi/SJSON/1.1.0
# https://shelter13.net/projects/SJSON/
# https://bitbucket.org/Anteru/sjson/src
import sjson
if __name__ == "__main__":
if sys.version_info < (3, 4):
print('Python 3.4 or higher needed to run this script')
sys.exit(1)
if len(sys.argv) != 2:
print('Usage: python gen_bit_rate_stats.py <path/to/input_file.sjson>')
sys.exit(1)
input_sjson_file = sys.argv[1]
if not input_sjson_file.endswith('.sjson'):
print('Expected SJSON input file, found: {}'.format(input_sjson_file))
sys.exit(1)
if not os.path.exists(input_sjson_file):
print('Input file not found: {}'.format(input_sjson_file))
sys.exit(1)
with open(input_sjson_file, 'r') as file:
input_sjson_data = sjson.loads(file.read())
input_data_type_def = {
'names': ('algorithm_names', '0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'),
'formats': ('S128', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4')
}
columns_to_extract = (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19)
output_csv_file_path = 'D:\\acl-dev\\tools\\graph_generation\\bit_rates.csv'
output_csv_data = []
output_csv_headers = ['Bit Rate']
output_csv_data.append(['0', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '32'])
for entry in input_sjson_data['inputs']:
print('Parsing {} ...'.format(entry['header']))
csv_data = numpy.loadtxt(entry['file'], delimiter=',', dtype=input_data_type_def, skiprows=1, usecols=columns_to_extract)
filter = entry.get('filter', None)
if filter != None:
best_variable_data_mask = csv_data['algorithm_names'] == bytes(entry['filter'], encoding = 'utf-8')
csv_data = csv_data[best_variable_data_mask]
# Strip algorithm name
output_csv_data.append(csv_data[0].tolist()[1:])
output_csv_headers.append(entry['header'])
output_csv_data = numpy.column_stack(output_csv_data)
with open(output_csv_file_path, 'wb') as f:
header = bytes('{}\n'.format(','.join(output_csv_headers)), 'utf-8')
f.write(header)
numpy.savetxt(f, output_csv_data, delimiter=',', fmt=('%s'))
|
nfrechette/acl
|
tools/graph_generation/gen_bit_rate_stats.py
|
Python
|
mit
| 2,309 | 0.020788 |
from common import *
DEBUG = True
MONGODB_SETTINGS = {
'HOST': '127.0.0.1',
'PORT': 27017,
'DB': 'myhoard_dev',
'USERNAME': 'myhoard',
'PASSWORD': 'myh0@rd',
}
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'NOTSET',
'handlers': ['console', 'file'],
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'standard',
'stream': 'ext://sys.stdout',
},
'file': {
'class': 'logging.handlers.RotatingFileHandler',
'level': 'INFO',
'formatter': 'standard',
'filename': '/home/pat/logs/dev/myhoard.log',
'mode': 'a',
'maxBytes': 2 * 1024 * 1024, # 2MiB
'backupCount': 64,
},
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s',
},
},
}
|
blstream/myHoard_Python
|
myhoard/settings/dev.py
|
Python
|
apache-2.0
| 1,018 | 0.001965 |
class Solution(object):
def maxSumOfThreeSubarrays(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
window = []
c_sum = 0
for i, v in enumerate(nums):
c_sum += v
if i >= k: c_sum -= nums[i-k]
if i >= k-1: window.append(c_sum)
left = [0] * len(window)
best = 0
for i in range(len(window)):
if window[i] > window[best]:
best = i
left[i] = best
right = [0] * len(window)
best = len(window) - 1
for i in range(len(window)-1, -1, -1):
if window[i] > window[best]:
best = i
right[i] = best
ans = None
for b in range(k, len(window) - k):
a, c = left[b-k], right[b+k]
if ans is None or (window[a] + window[b] + window[c] >
window[ans[0]] + window[ans[1]] + window[ans[2]]):
ans = a, b, c
return ans
|
Mlieou/leetcode_python
|
leetcode/python/ex_689.py
|
Python
|
mit
| 1,077 | 0.007428 |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.exterior_equipment import ExteriorFuelEquipment
log = logging.getLogger(__name__)
class TestExteriorFuelEquipment(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_exteriorfuelequipment(self):
pyidf.validation_level = ValidationLevel.error
obj = ExteriorFuelEquipment()
# alpha
var_name = "Name"
obj.name = var_name
# alpha
var_fuel_use_type = "Electricity"
obj.fuel_use_type = var_fuel_use_type
# object-list
var_schedule_name = "object-list|Schedule Name"
obj.schedule_name = var_schedule_name
# real
var_design_level = 0.0
obj.design_level = var_design_level
# alpha
var_enduse_subcategory = "End-Use Subcategory"
obj.enduse_subcategory = var_enduse_subcategory
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.exteriorfuelequipments[0].name, var_name)
self.assertEqual(idf2.exteriorfuelequipments[0].fuel_use_type, var_fuel_use_type)
self.assertEqual(idf2.exteriorfuelequipments[0].schedule_name, var_schedule_name)
self.assertAlmostEqual(idf2.exteriorfuelequipments[0].design_level, var_design_level)
self.assertEqual(idf2.exteriorfuelequipments[0].enduse_subcategory, var_enduse_subcategory)
|
rbuffat/pyidf
|
tests/test_exteriorfuelequipment.py
|
Python
|
apache-2.0
| 1,733 | 0.003462 |
"""
sphinxit.core.constants
~~~~~~~~~~~~~~~~~~~~~~~
Defines some Sphinx-specific constants.
:copyright: (c) 2013 by Roman Semirook.
:license: BSD, see LICENSE for more details.
"""
from collections import namedtuple
RESERVED_KEYWORDS = (
'AND',
'AS',
'ASC',
'AVG',
'BEGIN',
'BETWEEN',
'BY',
'CALL',
'COLLATION',
'COMMIT',
'COUNT',
'DELETE',
'DESC',
'DESCRIBE',
'DISTINCT',
'FALSE',
'FROM',
'GLOBAL',
'GROUP',
'IN',
'INSERT',
'INTO',
'LIMIT',
'MATCH',
'MAX',
'META',
'MIN',
'NOT',
'NULL',
'OPTION',
'OR',
'ORDER',
'REPLACE',
'ROLLBACK',
'SELECT',
'SET',
'SHOW',
'START',
'STATUS',
'SUM',
'TABLES',
'TRANSACTION',
'TRUE',
'UPDATE',
'VALUES',
'VARIABLES',
'WARNINGS',
'WEIGHT',
'WHERE',
'WITHIN'
)
ESCAPED_CHARS = namedtuple('EscapedChars', ['single_escape', 'double_escape'])(
single_escape=("'", '+', '[', ']', '=', '*'),
double_escape=('@', '!', '^', '(', ')', '~', '-', '|', '/', '<<', '$', '"')
)
NODES_ORDER = namedtuple('NodesOrder', ['select', 'update'])(
select=(
'SelectFrom',
'Where',
'GroupBy',
'OrderBy',
'WithinGroupOrderBy',
'Limit',
'Options'
),
update=(
'UpdateSet',
'Where',
'Options'
)
)
|
abhijo89/sphinxit
|
sphinxit/core/constants.py
|
Python
|
bsd-3-clause
| 1,436 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.