repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
bnbowman/HlaTools
|
src/pbhla/io/utils.py
|
1
|
4688
|
#! /usr/bin/env python
from pbhla.filenames import get_file_type
__author__ = 'bbowman@pacificbiosciences.com'
import logging
from tempfile import NamedTemporaryFile
from pbcore.io.FastaIO import FastaRecord, FastaWriter
from pbcore.io.FastqIO import FastqRecord, FastqWriter
from pbhla.io.AmpAnalysisIO import AmpliconAnalysisRecord, AmpliconAnalysisWriter
from pbhla.utils import check_output_file
log = logging.getLogger()
def slice_2d( args ):
"""
Convert a __getitems__ input into a pair of slice objects for ease-of-use
"""
if isinstance( args, int ):
return _to_slice(args), slice(None)
elif isinstance( args, slice ):
return args, slice(None)
elif isinstance( args, tuple ):
if len(args) > 2:
raise ValueError("Cannot create 2D slice from %s arguments" % len(args))
else:
return (_to_slice(item) for item in args)
else:
raise TypeError("slice_2d accepts Int, Slice, or Tuple arguments")
def _to_slice( item ):
"""
Convert an item from Int to Slice if needed
"""
if isinstance( item, int ):
return slice( item, item+1 )
if isinstance( item, slice ):
return item
else:
raise TypeError("Input must be Int or Slice")
def parse_locus_dict( filename ):
"""
Read a dictionary of values from a file with locus-specific data
"""
data = {}
with open( filename, 'r' ) as handle:
for line in handle:
datum, locus = line.strip().split()
if locus in data:
msg = 'Duplicate locus fofn "%s"!' % locus
log.error( msg )
raise KeyError( msg )
else:
data[locus] = datum
return data
def get_output_file( input_file, modifier ):
"""
Get a modified output file name based on some input file
"""
basename = '.'.join( input_file.split('.')[:-1] )
file_type = get_file_type( input_file )
return '%s.%s.%s' % (basename, modifier, file_type)
def get_temp_fasta_record( record ):
"""
If a record isn't in Fasta format, try to create a FastaRecord from it
"""
if isinstance( record, FastaRecord ):
return record
try:
return FastaRecord( record.name.strip(), record.sequence.strip() )
except:
msg = 'Unrecognized sequence record type'
log.error( msg )
raise TypeError( msg )
def get_temp_fasta( record ):
"""
Create a temporary Fasta file for Blasr/HMMsearch/etc
"""
temp_record = get_temp_fasta_record( record )
temp_fasta = NamedTemporaryFile( suffix='.fasta' )
with FastaWriter( temp_fasta.name ) as handle:
handle.writeRecord( temp_record )
return temp_fasta
def write_records( records, filename ):
if all([isinstance(r, FastaRecord) for r in records]):
write_fasta_records( records, filename )
elif all([isinstance(r, FastqRecord) for r in records]):
write_fastq_records( records, filename )
elif all([isinstance(r, AmpliconAnalysisRecord) for r in records]):
write_amp_analysis_records( records, filename )
else:
msg = 'Invalid sequence record type'
log.error( msg )
raise TypeError( msg )
def write_fasta_records( records, filename ):
log.info("Writing {0} FastaRecords to {1}".format(len(records), filename))
with FastaWriter( filename ) as handle:
for record in records:
handle.writeRecord( record )
check_output_file( filename )
def write_fastq_records( records, filename ):
log.info("Writing {0} FastqRecords to {1}".format(len(records), filename))
with FastqWriter( filename ) as handle:
for record in records:
handle.writeRecord( record )
check_output_file( filename )
def write_amp_analysis_records( records, filename ):
log.info("Writing {0} AmpAnalysisRecords to {1}".format(len(records), filename))
with AmpliconAnalysisWriter( filename ) as handle:
for record in records:
handle.write_fasta( record )
check_output_file( filename )
def get_unique_records( records ):
"""Remove redundant sequences, primarily to avoid confusing Quiver"""
sorted_records = sorted(records, key=lambda r: len(r.sequence), reverse=True)
unique_sequences = []
unique_records = []
for record in sorted_records:
is_unique = True
for sequence in unique_sequences:
if record.sequence in sequence:
is_unique = False
break
if is_unique:
unique_records.append( record )
unique_sequences.append( record.sequence )
return unique_records
|
bsd-3-clause
| 7,961,908,533,006,520,000 | 32.726619 | 84 | 0.636092 | false |
jashort/clexp
|
tests/test_expenseList.py
|
1
|
2297
|
from unittest import TestCase
import datetime
from ExpenseList import ExpenseList
class TestExpenseList(TestCase):
def setUp(self):
self.el = ExpenseList()
# passing amount as a float is valid
self.el.add_expense([2.11, "Food", "Candy Bar", "12/01/2013"])
# so is a string
self.el.add_expense(["53.32", "Living Expenses", "Electric bill", "12/02/2013"])
# so is a string with a $ in front
self.el.add_expense(["$11.74", "Fun", "Party Hats", "11/30/2013"])
self.el.add_expense([33.24, "Living Expenses", "Work hats", "11/29/2013"])
def test_add_expense(self):
e = ExpenseList()
e.add_expense([1.75, "Food", "Candy Bar", "12/01/2013"])
item = e.expenses[2013][12][0]
self.assertEqual(item.amount, 1.75)
self.assertEqual(item.category, "Food")
self.assertEqual(item.date, datetime.datetime(2013, 12, 1).date())
self.assertEqual(len(e.expenses), 1)
def test_get_expenses(self):
results = list(self.el.get_expenses(2013, 12))
self.assertEqual(len(results), 2)
results = list(self.el.get_expenses(2013))
self.assertEqual(len(results), 4)
results = list(self.el.get_expenses(2013, 11, "Fun"))
self.assertEqual(len(results), 1)
def test_get_total(self):
self.assertEqual(self.el.get_total(), 100.41)
self.assertEqual(self.el.get_total(2013), 100.41)
self.assertEqual(self.el.get_total(2013, 11), 44.98)
self.assertEqual(self.el.get_total(2013, 12, "Food"), 2.11)
def test_get_average_per_day(self):
self.assertEqual(self.el.get_average_per_day(2013, 11), 1.5)
def test_get_years(self):
self.assertEqual(self.el.get_years(), [2013])
def test_get_months(self):
self.assertEqual(self.el.get_months(2013), [11, 12])
def test_get_month_totals(self):
totals = self.el.get_month_totals()
self.assertEqual(totals[0][0], datetime.datetime(2013, 11, 1))
self.assertEqual(totals[0][1], 44.98)
self.assertEqual(totals[1][0], datetime.datetime(2013, 12, 1))
self.assertEqual(totals[1][1], 55.43)
def test_categories(self):
categories = self.el.categories
self.assertEqual(len(categories), 3)
|
mit
| 7,159,833,294,169,194,000 | 37.949153 | 88 | 0.62168 | false |
apache/airflow
|
airflow/timetables/interval.py
|
2
|
3585
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
from typing import Any, Optional
from pendulum import DateTime
from airflow.timetables.base import DagRunInfo, TimeRestriction, Timetable
from airflow.timetables.schedules import CronSchedule, Delta, DeltaSchedule, Schedule
class _DataIntervalTimetable(Timetable):
"""Basis for timetable implementations that schedule data intervals.
This kind of timetable classes create periodic data intervals from an
underlying schedule representation (e.g. a cron expression, or a timedelta
instance), and schedule a DagRun at the end of each interval.
"""
_schedule: Schedule
def __eq__(self, other: Any) -> bool:
"""Delegate to the schedule."""
if not isinstance(other, _DataIntervalTimetable):
return NotImplemented
return self._schedule == other._schedule
def validate(self) -> None:
self._schedule.validate()
def next_dagrun_info(
self,
last_automated_dagrun: Optional[DateTime],
restriction: TimeRestriction,
) -> Optional[DagRunInfo]:
earliest = restriction.earliest
if not restriction.catchup:
earliest = self._schedule.skip_to_latest(earliest)
if last_automated_dagrun is None:
# First run; schedule the run at the first available time matching
# the schedule, and retrospectively create a data interval for it.
if earliest is None:
return None
start = self._schedule.align(earliest)
else:
# There's a previous run. Create a data interval starting from when
# the end of the previous interval.
start = self._schedule.get_next(last_automated_dagrun)
if restriction.latest is not None and start > restriction.latest:
return None
end = self._schedule.get_next(start)
return DagRunInfo.interval(start=start, end=end)
class CronDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a cron expression.
This corresponds to ``schedule_interval=<cron>``, where ``<cron>`` is either
a five/six-segment representation, or one of ``cron_presets``.
Don't pass ``@once`` in here; use ``OnceTimetable`` instead.
"""
def __init__(self, cron: str, timezone: datetime.tzinfo) -> None:
self._schedule = CronSchedule(cron, timezone)
class DeltaDataIntervalTimetable(_DataIntervalTimetable):
"""Timetable that schedules data intervals with a time delta.
This corresponds to ``schedule_interval=<delta>``, where ``<delta>`` is
either a ``datetime.timedelta`` or ``dateutil.relativedelta.relativedelta``
instance.
"""
def __init__(self, delta: Delta) -> None:
self._schedule = DeltaSchedule(delta)
|
apache-2.0
| -9,126,267,543,298,766,000 | 37.967391 | 85 | 0.697071 | false |
andreasjansson/head-in-the-clouds
|
headintheclouds/firewall.py
|
1
|
3928
|
from fabric.api import * # pylint: disable=W0614,W0401
CHAIN = 'HEAD_IN_THE_CLOUDS'
def set_rules(open_list, from_chains=('INPUT',)):
rules = make_rules(open_list, from_chains)
rules = ['iptables ' + r for r in rules]
cmd = ' && '.join(rules)
sudo(cmd)
def make_rules(open_list, from_chains=('INPUT',)):
c = [] # list of commands we will join with &&
if has_chain():
c.append(flush_chain)
else:
c.append(make_chain)
for from_chain in from_chains:
if not has_jump(from_chain):
c.append(jump_to_chain(from_chain))
c.append(drop_null_packets)
c.append(drop_syn_flood)
c.append(drop_xmas_packets)
c.append(accept_loopback)
c.append(accept_ping)
c.append(accept_docker)
# allow dns ports
c += accept(53, None, 'tcp', None)
c += accept(53, None, 'udp', None)
# allow ssh
c += accept(None, 22, 'tcp', None)
for source_port, destination_port, protocol, addresses in open_list:
c += accept(source_port, destination_port, protocol, addresses)
c.append(accept_established)
c.append(drop_all)
return c
def get_rules():
with settings(hide('everything'), warn_only=True):
rules = sudo('iptables -S %s' % CHAIN)
rules = rules.splitlines()
rules = [r for r in rules if r != make_chain]
return rules
def rules_are_active(open_list, from_chains=('INPUT',)):
new_rules = make_rules(open_list, from_chains)
new_rules = [r for r in new_rules if r != flush_chain]
existing_rules = get_rules()
# it's a bit silly but we don't actually care about order
return set(new_rules) == set(existing_rules)
def has_chain():
with settings(hide('everything'), warn_only=True):
return not sudo('iptables -L %s' % CHAIN).failed
def accept(source_port, destination_port, protocol, raw_addresses):
'''
accepts comma separated addresses or list of addresses
'''
protocol = protocol or 'tcp'
if not isinstance(raw_addresses, list):
raw_addresses = [raw_addresses]
addresses = []
for a in raw_addresses:
if a is None:
addresses.append(None)
else:
addresses += a.split(',')
rules = []
for address in addresses:
parts = ['-A', CHAIN]
if address:
address, _, mask = address.partition('/')
mask = mask or '32'
parts.append('-s %s/%s' % (address, mask))
if source_port:
parts.append('-p %s -m %s --sport %s' % (protocol, protocol, source_port))
if destination_port:
parts.append('-p %s -m %s --dport %s' % (protocol, protocol, destination_port))
parts += ['-j', 'RETURN']
rules.append(' '.join(parts))
return rules
def jump_to_chain(from_chain='INPUT'):
return '-A %s -j %s' % (from_chain, CHAIN)
def delete_jump(from_chain='INPUT'):
return '-D %s -j %s' % (from_chain, CHAIN)
def has_jump(from_chain):
with settings(hide('everything'), warn_only=True):
return not sudo('iptables -C %s -j %s' % (from_chain, CHAIN)).failed
flush_chain = '-F %s' % CHAIN
make_chain = '-N %s' % CHAIN
drop_null_packets = '-A %s -p tcp -m tcp --tcp-flags FIN,SYN,RST,PSH,ACK,URG NONE -j DROP' % CHAIN
drop_syn_flood = '-A %s -p tcp -m tcp ! --tcp-flags FIN,SYN,RST,ACK SYN -m state --state NEW -j DROP' % CHAIN
drop_xmas_packets = '-A %s -p tcp -m tcp --tcp-flags FIN,SYN,RST,PSH,ACK,URG FIN,SYN,RST,PSH,ACK,URG -j DROP' % CHAIN
accept_loopback = '-A %s -i lo -j RETURN' % CHAIN
accept_established = '-A %s -m state --state RELATED,ESTABLISHED -j RETURN' % CHAIN
accept_ping = '-A %s -p icmp -m icmp --icmp-type 8 -j RETURN' % CHAIN
accept_docker = '-A %s -i docker0 -j RETURN' % CHAIN
drop_all = '-A %s -j DROP' % CHAIN
delete_chain = '-X %s' % CHAIN
class FirewallException(Exception):
pass
|
gpl-3.0
| 6,920,921,952,457,535,000 | 29.6875 | 118 | 0.598778 | false |
lukasmartinelli/opencodereview
|
opencodereview/apps/reviews/views.py
|
1
|
2271
|
from django.shortcuts import render_to_response, redirect, render
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.conf import settings
from github3 import login
from .forms import ReviewRequestForm
from .models import ReviewRequest
def _authenticate_github_api(request):
user = request.user
if user and user.is_authenticated():
auth = user.social_auth.get(provider='github')
return login(token=auth.access_token)
return login(token=settings.SOCIAL_AUTH_GITHUB_SECRET)
def home(request):
review_requests = ReviewRequest.objects.all()
reviewers = User.objects.all()
return render(request, 'home.html', {
'requests': review_requests,
'reviewers': reviewers
})
def logout(request):
auth_logout(request)
return redirect('/')
def browse(request):
review_requests = ReviewRequest.objects.all()
return render(request, 'browse.html', {
'requests': review_requests
})
def update_review_request_from_github(gh, request_review):
repo = gh.repository(request_review.repo_owner, request_review.repo_name)
submitter = gh.user(request_review.submitter.username)
request_review.repo_avatar_url = submitter.avatar_url
request_review.repo_description = repo.description
request_review.repo_stars = repo.stargazers
return request_review
def new(request):
if request.method == 'POST':
form = ReviewRequestForm(request.POST)
if form.is_valid():
review_request = form.save(commit=False)
review_request.submitter = request.user
repo_owner, repo_name = form.cleaned_data['github_repo'].split('/')
review_request.repo_owner = repo_owner
review_request.repo_name = repo_name
gh = _authenticate_github_api(request)
update_review_request_from_github(gh, review_request)
review_request.save()
return redirect('/')
return render(request, 'new.html', {
'form': form
})
else:
form = ReviewRequestForm()
return render(request, 'new.html', {
'form': form
})
|
mit
| -2,511,120,545,363,810,000 | 28.493506 | 79 | 0.667547 | false |
JohnOmernik/pimeup
|
animatronics/oldPWM/JO_Servo.py
|
1
|
1554
|
#!/usr/bin/python
from Adafruit_PWM_Servo_Driver import PWM
import time
import sys
# ===========================================================================
# Example Code
# ===========================================================================
# Initialise the PWM device using the default address
pwm = PWM(0x40)
# Note if you'd like more debug output you can instead run:
#pwm = PWM(0x40, debug=True)
servoMin = 150 # Min pulse length out of 4096
servoMax = 600 # Max pulse length out of 4096
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
pwm.setPWMFreq(60) # Set frequency to 60 Hz
#little low = 150
#
while True:
u = raw_input("Set pulse (e to exit): ")
if str(u) == "e":
sys.exit(0)
try:
u = int(u)
pwm.setPWM(3, 0, u)
except:
print("Not an int: %s - try again" % u)
f = """
while (True):
# Change speed of continuous servo on channel O
pwm.setPWM(0, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(1, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(2, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(0, 0, servoMax)
time.sleep(0.5)
pwm.setPWM(1, 0, servoMax)
time.sleep(0.5)
pwm.setPWM(2, 0, servoMax)
time.sleep(0.5)
"""
|
apache-2.0
| -5,419,668,898,720,730,000 | 22.19403 | 77 | 0.545045 | false |
leejw51/BumblebeeNet
|
Test/AddLayer.py
|
1
|
1320
|
import numpy as np
import matplotlib.pylab as plt
from MulLayer import MulLayer
class AddLayer:
def __init__ (self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx,dy
def test_add_layer():
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()
apple_price = mul_apple_layer.forward( apple, apple_num)
orange_price = mul_orange_layer.forward( orange, orange_num)
all_price = add_apple_orange_layer.forward( apple_price, orange_price)
price = mul_tax_layer.forward( all_price, tax)
dprice = 1
dall_price, dtax = mul_tax_layer.backward( dprice)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward( dapple_price)
print("price=", price)
print(dapple_num, dapple, dorange, dorange_num, dtax)
|
mit
| -5,489,069,071,422,700,000 | 27.085106 | 81 | 0.568182 | false |
stephenliu1989/HK_DataMiner
|
hkdataminer/cluster/faiss_dbscan_.py
|
1
|
14197
|
# -*- coding: utf-8 -*-
"""
DBSCAN Acclerated by Facebook AI Faiss
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
import time
from scipy import sparse
from numba import autojit
import numba
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array, check_consistent_length
#from sklearn.neighbors import NearestNeighbors
from sklearn.cluster._dbscan_inner import dbscan_inner
import faiss
@autojit
def get_neighborhoods(D, I, eps):
neighborhoods = []
for i in range(len(D)):
distances = D[i]
#print(distances)
distances = np.delete(distances, 0)
indices = I[i]
indices = np.delete(indices, 0)
#print(indices)
index = indices[distances <= eps]
neighborhoods.append(index)
#neighborhoods = np.asarray(neighborhoods)
#np.savetxt('faiss_neighborhoods', np.asarray(neighborhoods), fmt='%s')
return np.asarray(neighborhoods)
def cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index_cpu.is_trained
index_cpu.train(X)
assert index_cpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
index_cpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_cpu.search(X[samples], k) # sanity check
while np.min(np.amax(D, axis=1)) < eps:
k = k * 2
# D, I = index_gpu.search(X[samples], k)
#print(np.min(np.amax(D, axis=1)), eps, k)
D, I = index_cpu.search(X[samples], k)
if k > 1024:
k = 1000
#print(np.max(D[:, k - 1]), k, eps)
index_cpu.nprobe = nprobe
D, I = index_cpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
res = faiss.StandardGpuResources() # use a single GPU
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
# make it an IVF GPU index
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
assert not index_gpu.is_trained
index_gpu.train(X)
assert index_gpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
index_gpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_gpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_gpu.search(X[samples], k)
#print(np.max(D[:, k - 1]), k, eps)
index_gpu.nprobe = nprobe
D, I = index_gpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def faiss_dbscan(X, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1, GPU=False, IVFFlat=True):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if GPU is True:
neighborhoods = gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
else:
neighborhoods = cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class Faiss_DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', n_jobs=1, GPU=False, IVFFlat=True):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.n_jobs = n_jobs
self.GPU = GPU
self.IVFFlat = IVFFlat
self.nlist = nlist
self.nprobe = nprobe
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
#if metric is not "rmsd":
# X = check_array(X, accept_sparse='csr')
#t0 = time.time()
clust = faiss_dbscan(X, eps=self.eps, min_samples=self.min_samples, nlist=self.nlist, nprobe=self.nprobe, sample_weight=sample_weight, GPU=self.GPU, IVFFlat=self.IVFFlat)
#t1 = time.time()
#print("Faiss DBSCAN clustering Time Cost:", t1 - t0)
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
|
apache-2.0
| -5,220,703,586,160,923,000 | 39.21813 | 178 | 0.657111 | false |
davidfstr/iTunes-Connect-Autodownload
|
autodownload.py
|
1
|
2639
|
import os
import re
import datetime
import subprocess
vendorid = 85838187 # David Foster
# Find all reports in the current directory
reports = [] # list of (vendorid, YYYYMMDD), both strings
for filename in os.listdir('.'):
# NOTE: Download filename format changed on 2011-11-03
m = re.match(r'S_D_([0-9]+)_([0-9]{8})(_[^.]*)?\.txt(\.gz)?', filename)
if m is None:
continue
reports.append((m.group(1), m.group(2)))
if len(reports) == 0:
exit('No reports found in the current directory.')
# Find all report dates for the vendor of interest
dates = [x[1] for x in reports if x[0] == str(vendorid)] # list of YYYYMMDD
if len(reports) == 0:
exit('No reports in the current directory match the vendor ID ' + str(vendorID) + '.')
# Determine reports available for download
downloadableDates = [] # list of YYYYMMDD
now = datetime.datetime.now()
for i in xrange(30):
downloadableDates.append((now - datetime.timedelta(days = i+1)).strftime('%Y%m%d'))
# Determine reports available for download that haven't already been downloaded
missingDates = list(set(downloadableDates) - set(dates)) # list of YYYYMMDD
missingDates.sort()
if len(missingDates) == 0:
print 'All reports have been downloaded already.'
exit(0)
# Download all missing reports, recording any errors
downloadErrors = [] # list of (YYYYMMDD, stdoutdata, stderrdata)
for curDate in missingDates:
downloader = subprocess.Popen(['java', 'Autoingestion', 'autoingestion.properties', str(vendorid), 'Sales', 'Daily', 'Summary', curDate], stdout=subprocess.PIPE)
out, err = downloader.communicate()
if 'File Downloaded Successfully' in out:
continue
# NOTE: Status message changed format on 2014-06-20
if ('There are no reports available to download for this selection.' in out or
'There is no report available to download, for the selected period'):
# No downloads occurred on this day.
# Generate placeholder result file to avoid refetching.
with open('S_D_%s_%s.txt' % (vendorid, curDate), 'wb'):
pass
continue
downloadErrors.append((curDate, out, err))
# Print summary
if len(downloadErrors) == 0:
print "Downloaded %s report(s)." % (len(missingDates))
else:
for (date, out, err) in downloadErrors:
print date + ':'
print out
print
print "Error downloading %s report(s). Remaining %s reports downloaded." % (len(downloadErrors), len(missingDates) - len(downloadErrors))
exit(1)
|
mit
| 4,098,500,137,916,123,000 | 40.888889 | 165 | 0.648731 | false |
workforce-data-initiative/tpot-warehouse
|
migrations/transactional/versions/42026ba5bc27_createdb.py
|
1
|
3890
|
"""createdb
Revision ID: 42026ba5bc27
Revises:
Create Date: 2017-06-22 20:07:58.548427
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '42026ba5bc27'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.create_table('entity_type',
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=140), nullable=False),
sa.Column('description', sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint('type_id')
)
op.create_table('exit_type',
sa.Column('type_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=140), nullable=False),
sa.Column('description', sa.String(length=250), nullable=True),
sa.PrimaryKeyConstraint('type_id')
)
op.create_table('outcome',
sa.Column('potential_outcome_id', sa.Integer(), nullable=False),
sa.Column('description', sa.String(length=250), nullable=False),
sa.PrimaryKeyConstraint('potential_outcome_id')
)
op.create_table('participant',
sa.Column('participant_id', sa.Integer(), nullable=False),
sa.Column('wioa_participant', sa.Boolean(), nullable=False),
sa.Column('wioa_lta_participant', sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint('participant_id')
)
op.create_table('program',
sa.Column('program_cip', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=140), nullable=False),
sa.Column('potential_outcome_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['potential_outcome_id'], ['outcome.potential_outcome_id'], ),
sa.PrimaryKeyConstraint('program_cip')
)
op.create_table('provider',
sa.Column('provider_id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=140), nullable=False),
sa.Column('type_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['type_id'], ['entity_type.type_id'], ),
sa.PrimaryKeyConstraint('provider_id')
)
op.create_table('wage',
sa.Column('wage_start_date', sa.Date(), nullable=False),
sa.Column('wage_end_date', sa.Date(), nullable=False),
sa.Column('participant_id', sa.Integer(), nullable=False),
sa.Column('wage_amt', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['participant_id'], ['participant.participant_id'], ),
sa.PrimaryKeyConstraint('wage_start_date', 'wage_end_date', 'participant_id')
)
op.create_table('participant_program',
sa.Column('participant_id', sa.Integer(), nullable=False),
sa.Column('program_cip', sa.Integer(), nullable=False),
sa.Column('entry_date', sa.Date(), nullable=False),
sa.Column('exit_date', sa.Date(), nullable=True),
sa.Column('enrolled', sa.Boolean(), nullable=False),
sa.Column('exit_type_id', sa.Integer(), nullable=True),
sa.Column('obtained_credential', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['exit_type_id'], ['exit_type.type_id'], ),
sa.ForeignKeyConstraint(['participant_id'], ['participant.participant_id'], ),
sa.ForeignKeyConstraint(['program_cip'], ['program.program_cip'], ),
sa.PrimaryKeyConstraint('participant_id', 'program_cip')
)
op.create_table('program_provider',
sa.Column('program_cip', sa.Integer(), nullable=False),
sa.Column('provider_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['program_cip'], ['program.program_cip'], ),
sa.ForeignKeyConstraint(['provider_id'], ['provider.provider_id'], ),
sa.PrimaryKeyConstraint('program_cip', 'provider_id')
)
def downgrade():
op.drop_table('program_provider')
op.drop_table('participant_program')
op.drop_table('wage')
op.drop_table('provider')
op.drop_table('program')
op.drop_table('participant')
op.drop_table('outcome')
op.drop_table('exit_type')
op.drop_table('entity_type')
|
apache-2.0
| -7,156,879,574,634,049,000 | 39.520833 | 90 | 0.674807 | false |
jmflinuxtx/kerneltest-harness
|
kerneltest/app.py
|
1
|
2477
|
# Licensed under the terms of the GNU GPL License version 2
"""
This module handles the creation and configuration of the Flask application.
"""
import collections
import datetime
from flask_restful import Api
from sqlalchemy.orm.exc import NoResultFound
import flask
from . import default_config, db, __version__, ui_view, authentication, api
User = collections.namedtuple("User", ["groups", "cla", "username"])
def create(config=None):
"""
Create an instance of the Flask application
Args:
config (dict): A dictionary with configuration options to use with the
application instead of loading the default configuration. Useful for
testing purposes only.
Returns:
flask.Flask: The configured Flask application.
"""
app = flask.Flask(__name__)
if config:
app.config.update(config)
else:
app.config.update(default_config.config.load_config())
db.initialize(app.config)
authentication.oidc.init_app(app)
app.api = Api(app)
app.api.add_resource(api.Results, "/api/v1/results/")
app.register_blueprint(ui_view.blueprint, url_prefix="/")
app.before_request(pre_request_user)
app.teardown_request(post_request_db)
app.context_processor(include_template_variables)
app.register_error_handler(NoResultFound, handle_no_result)
return app
def handle_no_result(exception):
"""Turn SQLAlchemy NotFound into HTTP 404"""
return "Not found", 404
def include_template_variables():
"""
A Flask context processor that makes a set of variables available in every
Jinja2 template.
"""
releases = db.Release.query.maintained()
rawhide = db.Release.query.rawhide()
admin = False
if flask.g.user:
admin = ui_view.is_admin()
return dict(
date=datetime.datetime.utcnow().strftime("%a %b %d %Y %H:%M"),
releases=releases,
rawhide=rawhide,
version=__version__,
is_admin=admin,
)
def post_request_db(*args, **kwargs):
"""Remove the database session after the request is done."""
db.Session.remove()
def pre_request_user():
"""Set up the user as a flask global object."""
if ui_view.oidc.user_loggedin:
flask.g.user = User(
ui_view.oidc.user_getfield("groups"),
"FPCA" in ui_view.oidc.user_getfield("agreements"),
ui_view.oidc.user_getfield("nickname"),
)
else:
flask.g.user = None
|
gpl-2.0
| -2,710,535,832,293,777,000 | 26.522222 | 80 | 0.659265 | false |
dparlevliet/elastic-firewall
|
api/digital_ocean.py
|
1
|
1323
|
"""
@fileoverview Digital Ocean API
@author David Parlevliet
@version 20130315
@preserve Copyright 2013 David Parlevliet.
Digital Ocean API
=================
Class to get the server details via the Digital Ocean API.
"""
import requests
import json
import re
class Api():
group_name = "Digital Ocean"
client_key = None
api_key = None
servers = {}
def __init__(self, **kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
def grab_servers(self):
DROPLETS_URL = 'https%s/droplets/?client_id=%s&api_key=%s' % \
('://api.digitalocean.com',
self.client_key,
self.api_key)
try:
droplets = requests.get(DROPLETS_URL)
except:
raise Exception("Fatal error: Unable to connect to API")
try:
data = json.loads(droplets.text)
except:
raise Exception("Fatal error: No droplets found")
for droplet in data['droplets']:
name = droplet['name']
if name not in self.servers:
self.servers[name] = []
self.servers[name].append(droplet['ip_address'])
def get_servers(self, name):
servers = []
for c_hostname in self.servers:
servers = servers + (self.servers[c_hostname] if re.match(name, c_hostname) else [])
return servers
|
mit
| -8,083,045,312,272,190,000 | 24.442308 | 90 | 0.605442 | false |
joergsimon/gesture-analysis
|
analysis/feature_selection.py
|
1
|
6744
|
from analysis.preparation import labelMatrixToArray
from analysis.preparation import normalizeZeroClassArray
from visualise.trace_features import trace_feature_origin
from visualise.confusion_matrix import plot_confusion_matrix
import numpy as np
import sklearn
import sklearn.linear_model
import sklearn.preprocessing as pp
import sklearn.svm as svm
import sklearn.feature_selection as fs
from analysis.classification import fit_classifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Interesting References:
# RFECV:
# Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene selection for
# cancer classification using support vector machines. Mach. Learn.. 46(1-3). 389-422.
def feature_selection(train_data, train_labels, const):
train_labels_arr, exclude = labelMatrixToArray(train_labels, const.label_threshold)
train_data_clean = train_data.drop(exclude)
train_labels_arr, train_data_clean, _ = normalizeZeroClassArray(train_labels_arr, train_data_clean)
print "num features before selection: {}".format(train_data_clean.columns.size)
feature_index = variance_threshold(train_data_clean)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:,feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after variance threshold".format(clf_name))
print(classification_report(train_labels_arr,prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index,const)
feature_index = rfe(train_data_clean,train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFE".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = k_best_chi2(train_data_clean, train_labels_arr, 700)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after Chi2".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = rfe_cv_f1(train_data_clean, train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFECV".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
plt.show()
def get_values(data, feature_index, needs_scaling):
if needs_scaling:
values = data.values[:, feature_index]
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)
return values
else:
return data.values[:, feature_index]
def variance_threshold(train_data):
# feature selection using VarianceThreshold filter
sel = fs.VarianceThreshold(threshold=(.8 * (1 - .8)))
fit = sel.fit(train_data.values)
col_index = fit.get_support(indices=True)
print "num features selected by VarianceThreshold: {}".format(len(col_index))
return col_index
def rfe(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values) # pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels))
print "run rfecv.."
rfecv = fs.RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(values, np.array(train_labels))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFE(CV)/Lasso: {}".format(len(col_index))
return col_index
def rfe_cv_f1(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = svm.SVC(kernel="linear") #sklearn.linear_model.Lasso(alpha = 0.1)
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)#pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels).astype(int))
print "run rfecv.."
rfecv = fs.RFECV(estimator=svc, step=0.05, verbose=2)
rfecv.fit(values, np.array(train_labels).astype(int))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFECV/SVR: {}".format(len(col_index))
return col_index
def k_best_chi2(train_data, train_labels, k):
values = train_data.values
if values.min() < 0:
values = values + abs(values.min())
kb = fs.SelectKBest(fs.chi2, k=k)
kb.fit(values, np.array(train_labels))
col_index = kb.get_support(indices=True)
print "num features selected by K-Best using chi2: {}".format(len(col_index))
return col_index
|
apache-2.0
| 9,204,650,859,059,847,000 | 47.178571 | 120 | 0.696323 | false |
Stanford-Online/notifier
|
notifier/tests/test_user.py
|
1
|
10684
|
"""
"""
from django.test import TestCase
from django.test.utils import override_settings
from mock import patch
from notifier.user import get_digest_subscribers, DIGEST_NOTIFICATION_PREFERENCE_KEY
from notifier.user import get_moderators
from .utils import make_mock_json_response
TEST_API_KEY = 'ZXY123!@#$%'
# some shorthand to quickly generate fixture results
mkresult = lambda n: {
"id": n,
"email": "email%d" % n,
"name": "name%d" % n,
"preferences": {
DIGEST_NOTIFICATION_PREFERENCE_KEY: "pref%d" % n,
},
"course_info": {},
}
mkexpected = lambda d: dict([(key, val) for (key, val) in d.items() if key != "url"])
def make_response(count=0):
"""Return fixture json response of users."""
return {
"count": count,
"next": None,
"previous": None,
"results": [mkresult(i) for i in xrange(count)],
}
@override_settings(US_API_KEY=TEST_API_KEY)
class RoleTestCase(TestCase):
"""
Test forum roles for moderators
"""
def setUp(self):
"""
Setup common test state
"""
self.course_id = "org/course/run"
self.expected_api_url = "test_server_url/user_api/v1/forum_roles/Moderator/users/"
self.expected_headers = {'X-EDX-API-Key': TEST_API_KEY}
self.expected_params = {
"page_size": 3,
"page": 1,
"course_id": self.course_id,
}
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_moderators_empty(self):
"""
Test that an empty moderator list can be retrieved
"""
expected_empty = make_response()
mock_response = make_mock_json_response(json=expected_empty)
with patch('requests.get', return_value=mock_response) as patched:
result = list(get_moderators(self.course_id))
patched.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers,
)
self.assertEqual(0, len(result))
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_moderators_single_page(self):
"""
Test that a moderator list can be retrieved
"""
expected = make_response(3)
mock_response = make_mock_json_response(json=expected)
with patch('requests.get', return_value=mock_response) as patched:
result = get_moderators(self.course_id)
result = list(result)
patched.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers
)
self.assertEqual(result, expected['results'])
self.assertEqual(expected['count'], len(result))
@override_settings(
US_URL_BASE="test_server_url",
US_RESULT_PAGE_SIZE=3,
US_HTTP_AUTH_USER='someuser',
US_HTTP_AUTH_PASS='somepass',
)
def test_get_moderators_basic_auth(self):
"""
Test that basic auth works
"""
expected = make_response(3)
mock_response = make_mock_json_response(json=expected)
with patch('requests.get', return_value=mock_response) as patched:
result = get_moderators(self.course_id)
result = list(result)
patched.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers,
auth=('someuser', 'somepass'),
)
self.assertEqual(result, expected['results'])
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_moderators_multi_page(self):
"""
Test that a moderator list can be paged
"""
expected_pages = [
{
"count": 5,
"next": "not none",
"previous": None,
"results": [
mkresult(i) for i in xrange(1, 4)
],
},
{
"count": 5,
"next": None,
"previous": "not none",
"results": [
mkresult(i) for i in xrange(4, 6)
],
},
]
mock_response = make_mock_json_response(json=expected_pages[0])
with patch('requests.get', return_value=mock_response) as patched:
result = []
users = get_moderators(self.course_id)
result.append(users.next())
patched.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
result.append(users.next())
result.append(users.next()) # result 3, end of page
self.assertEqual(
[
mkexpected(mkresult(i)) for i in xrange(1, 4)
],
result
)
# still should only have called requests.get() once
self.assertEqual(1, patched.call_count)
patched.reset_mock() # reset call count
self.expected_params['page'] = 2
mock_response.json.return_value = expected_pages[1]
self.assertEqual(mkexpected(mkresult(4)), users.next())
patched.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
self.assertEqual(mkexpected(mkresult(5)), users.next())
self.assertEqual(1, patched.call_count)
self.assertRaises(StopIteration, users.next)
@override_settings(US_API_KEY=TEST_API_KEY)
class UserTestCase(TestCase):
"""
"""
def setUp(self):
self.expected_api_url = "test_server_url/notifier_api/v1/users/"
self.expected_params = {"page_size":3, "page":1}
self.expected_headers = {'X-EDX-API-Key': TEST_API_KEY}
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_digest_subscribers_empty(self):
"""
"""
# empty result
mock_response = make_mock_json_response(json={
"count": 0,
"next": None,
"previous": None,
"results": []
})
with patch('requests.get', return_value=mock_response) as p:
res = list(get_digest_subscribers())
p.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
self.assertEqual(0, len(res))
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_digest_subscribers_single_page(self):
"""
"""
# single page result
mock_response = make_mock_json_response(json={
"count": 3,
"next": None,
"previous": None,
"results": [mkresult(1), mkresult(2), mkresult(3)]
})
with patch('requests.get', return_value=mock_response) as p:
res = list(get_digest_subscribers())
p.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
self.assertEqual([
mkexpected(mkresult(1)),
mkexpected(mkresult(2)),
mkexpected(mkresult(3))], res)
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3)
def test_get_digest_subscribers_multi_page(self):
"""
"""
# multi page result
expected_multi_p1 = {
"count": 5,
"next": "not none",
"previous": None,
"results": [mkresult(1), mkresult(2), mkresult(3)]
}
expected_multi_p2 = {
"count": 5,
"next": None,
"previous": "not none",
"results": [mkresult(4), mkresult(5)]
}
expected_pages = [expected_multi_p1, expected_multi_p2]
def side_effect(*a, **kw):
return expected_pages.pop(0)
mock_response = make_mock_json_response(json=expected_multi_p1)
with patch('requests.get', return_value=mock_response) as p:
res = []
g = get_digest_subscribers()
res.append(g.next())
p.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
res.append(g.next())
res.append(g.next()) # result 3, end of page
self.assertEqual([
mkexpected(mkresult(1)),
mkexpected(mkresult(2)),
mkexpected(mkresult(3))], res)
# still should only have called requests.get() once
self.assertEqual(1, p.call_count)
mock_response = make_mock_json_response(json=expected_multi_p2)
with patch('requests.get', return_value=mock_response) as p:
self.expected_params['page']=2
self.assertEqual(mkexpected(mkresult(4)), g.next())
p.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers)
self.assertEqual(mkexpected(mkresult(5)), g.next())
self.assertEqual(1, p.call_count)
self.assertRaises(StopIteration, g.next)
@override_settings(US_URL_BASE="test_server_url", US_RESULT_PAGE_SIZE=3, US_HTTP_AUTH_USER='someuser', US_HTTP_AUTH_PASS='somepass')
def test_get_digest_subscribers_basic_auth(self):
"""
"""
# single page result
mock_response = make_mock_json_response(json={
"count": 3,
"next": None,
"previous": None,
"results": [mkresult(1), mkresult(2), mkresult(3)]
})
with patch('requests.get', return_value=mock_response) as p:
res = list(get_digest_subscribers())
p.assert_called_once_with(
self.expected_api_url,
params=self.expected_params,
headers=self.expected_headers,
auth=('someuser', 'somepass'))
self.assertEqual([
mkexpected(mkresult(1)),
mkexpected(mkresult(2)),
mkexpected(mkresult(3))], res)
|
agpl-3.0
| -2,097,093,915,212,685,600 | 33.801303 | 136 | 0.544833 | false |
CLVsol/oehealth_ichu
|
oehealth_patient/__openerp__.py
|
1
|
2606
|
# -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2012 Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
{
'name': 'OeHealth: Patient',
'version': '1.0.0',
'author': 'Carlos Eduardo Vercelino - CLVsol',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'website': 'http://CLVsol.net',
'description': '''
''',
'images': [],
'depends': [
'oehealth_comm',
]
,
'data': [
'security/ir.model.access.csv',
],
'demo': [],
'test': [],
'init_xml': [
'security/oehealth_patient_security.xml',
'oehealth_person_view.xml',
#'data/oehealth_prescription_order.xml',
'oehealth_patient_view.xml',
#'oehealth_prescription_order/oehealth_prescription_order_view.xml',
#'oehealth_prescription_line/oehealth_prescription_line_view.xml',
#'oehealth_medication_template/oehealth_medication_template_view.xml',
#'oehealth_patient_medication/oehealth_patient_medication_view.xml'
],
'test': [],
'update_xml': [],
'installable': True,
'active': False,
'css': [
'static/src/css/patient.css',
],
}
|
agpl-3.0
| -572,427,993,051,303,940 | 45.535714 | 87 | 0.44551 | false |
Rignak/Scripts-Python
|
DeepLearning/TagPrediction/TagPrediction.py
|
1
|
10291
|
import numpy as np
import matplotlib.pyplot as plt
import os
from os.path import join
import cv2
from skimage.transform import resize
from tqdm import tqdm
from datetime import datetime
import functools
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras import optimizers
from keras.models import Model, load_model
from keras.layers import Flatten, Dense, Conv2D, Dropout, MaxPooling2D, BatchNormalization, Input
from keras.callbacks import ModelCheckpoint, Callback
import json
import tensorflow as tf
tf.reset_default_graph()
from keras import backend as K
K.image_dim_ordering()
###################### Hyperparameters ######################
# Mode paramaters
INPUT_SHAPE = (256, 256, 3)
IMAGE_NUMBER = 30000
WEIGHT_FILENAME = os.path.join('models', 'tag_prediction.hdf5')
ROOT = 'dress'
VALIDATION_SPLIT = 0.9
TAG_END = "_dress"
FILE_END = 'S'
MIN_TAG_USE = 500
# Training parameters
BATCH_SIZE = 8
EPOCHS = 100
LEARNING_RATE = 1 * 10 ** -3
DROPOUT = 0.5
MOMENTUM = 0.5
WEIGHT_DECAY = 4 * 10 ** -5 # weight decay
ACTIVATION = 'selu'
NEURON_BASIS = 32
class PlotLearning(Callback):
def __init__(self, examples=False):
super().__init__()
self.examples = examples
self.x = []
self.losses, self.val_losses = [], []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(epoch)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
plt.yscale('log')
plt.plot(self.x, self.losses)
plt.plot(self.x, self.val_losses)
plt.xlabel('Epochs')
plt.ylabel('Crossentropy')
plt.legend(['Training', 'Validation'])
plt.tight_layout()
plt.savefig('plot.png')
plt.close()
if self.examples:
z = self.model.predict(self.model.example[0][:6])
plot_example(self.model.example[0][:6], self.model.example[1][:6], z, self.model.labels)
plt.savefig(f"epochs/epoch{self.x[-1]}.png")
plt.close()
def plot_example(xs, ys, zs, labels):
n = xs.shape[0]
plt.figure(figsize=(12, 8))
plt.tight_layout()
for i, (x, y, z) in enumerate(zip(xs, ys, zs)):
if i != 0:
tick_label = [' ' for label in labels]
else:
tick_label = labels
plt.subplot(3, n, i + 1)
plt.imshow(x)
plt.subplot(3, n, i + n + 1)
plt.barh(labels, y, tick_label=tick_label)
plt.xlim(0, 1)
plt.subplot(3, n, i + 2 * n + 1)
plt.barh(labels, z, tick_label=tick_label)
plt.xlim(0, 1)
def import_model(tag_number, input_shape=INPUT_SHAPE):
inputs = Input(input_shape)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(inputs)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (1, 1), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Flatten()(layer)
layer = BatchNormalization()(layer)
layer = Dense(512, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(2048, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(tag_number, activation='sigmoid')(layer)
model = Model(inputs=[inputs], outputs=[layer])
sgd = optimizers.SGD(lr=LEARNING_RATE, momentum=MOMENTUM, nesterov=True)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.summary()
return model
def get_tags(root, files, min_tag_use=MIN_TAG_USE, suffix=TAG_END):
with open(join('..', 'imgs', root + '.json'), 'r') as file:
tags = json.load(file)
tag_count = {}
files = [os.path.split(file)[-1] for file in files]
for key, value in tags.items():
if key + f'{FILE_END}.png' not in files:
continue
for tag in value.split():
if tag not in tag_count:
tag_count[tag] = 1
else:
tag_count[tag] += 1
with open(join('..', 'imgs', root + '_count.json'), 'w') as file:
json.dump(tag_count, file, sort_keys=True, indent=4)
print(f'Have {len(list(tag_count.keys()))} tags')
tags_count = {tag: count for tag, count in tag_count.items() if count > min_tag_use and tag.endswith(suffix)}
print(f'Keep tags with >{min_tag_use} use: {len(tag_count)} tags')
for tag, count in tags_count.items():
print(f'{tag}: {count}')
input('Continue?')
return tags, tags_count
def make_output(files, tags, tags_count):
output = {}
for file in tqdm(files):
i = os.path.splitext(os.path.split(file)[-1])[0]
if FILE_END:
i = i[:-1]
truth = tags[i].split()
output[file] = []
for tag in tags_count.keys():
if tag in truth:
output[file].append(1)
else:
output[file].append(0)
return output
def metrics(model, files, output, tags_count):
true_positives = np.zeros(len(output))
positives = np.zeros(len(output))
truth = np.zeros(len(output))
for file in tqdm(files):
img = image_process(file)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)[0]
for i, coef in enumerate(prediction):
f = tags_count.values()[i] / len(files)
if coef > f:
positives[i] += 1
if output[file][i] > f:
truth[i] += 1
if output[file][i] > f and coef > f:
true_positives[i] += 1
print('Tag\tPrecision\tRecall')
for i, k, l, key in zip(true_positives, positives, truth, tags_count.keys()):
if k != 0:
precision = int(i / k * 1000) / 100
else:
precision = 0
if l != 0:
recall = int(i / l * 1000) / 100
else:
recall = 0
print(f'{key}\t{precision}%\t{recall}%\t')
# @functools.lru_cache(maxsize=IMAGE_NUMBER)
def image_process(file):
img = cv2.imread(file)
img = img[:, :, [2, 1, 0]]
# img = resize(img, INPUT_SHAPE, mode='reflect', preserve_range=True, anti_aliasing=True)
return img
def generator(files, output, batch_size=BATCH_SIZE):
while True:
batch_files = np.random.choice(files, size=batch_size)
# j += 1
# print(index, j, [(k + j) % n for k in index], [(k + j) for k in index], index+j)
batch_output = np.array([output[file] for file in batch_files])
batch_input = np.zeros([batch_size] + [shape for shape in INPUT_SHAPE])
for i, file in enumerate(batch_files):
batch_input[i] = image_process(file)
yield batch_input / 255, batch_output
def train(model, files, output, tags_count, weight_filename=WEIGHT_FILENAME,
validation_split=VALIDATION_SPLIT, epochs=EPOCHS, batch_size=BATCH_SIZE):
class_weights = {i: len(files) / count for i, count in enumerate(tags_count.values())}
index = int(len(files) * validation_split)
training_generator = generator(files[:index], output)
validation_generator = generator(files[index:], output)
calls = [ModelCheckpoint(weight_filename, save_best_only=True),
PlotLearning(examples=True)]
model.example = next(validation_generator)
model.labels = list(tags_count.keys())
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
steps_per_epoch=int(len(files) * validation_split) // batch_size,
validation_steps=int(len(files) * (1 - validation_split)) // batch_size,
epochs=epochs,
callbacks=calls,
class_weight=class_weights
)
def test(files, output, tags_count, weight_filename=WEIGHT_FILENAME):
model = load_model(weight_filename)
metrics(model, files, output, tags_count)
image_generator = generator(files, output, batch_size=1)
fs = [count / len(files) for count in tags_count.values()]
fs = [0.5 for i in fs]
while True:
print('---')
im, truth = next(image_generator)
truth_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(truth[0]) if v > fs[j]])
print('TRUTH:', truth_string)
print(im.shape)
prediction = model.predict(im)[0]
prediction_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(prediction) if v > fs[j]])
print('PREDICTION:', prediction_string)
plt.imshow(im[0])
plt.show()
def main():
root = join('..', 'imgs', ROOT)
files = [join(root, folder, file) for folder in os.listdir(root) for file in os.listdir(join(root, folder))][
:IMAGE_NUMBER]
tags, tags_count = get_tags(ROOT, files)
output = make_output(files, tags, tags_count)
# test(files, output, tags_count)
model = import_model(len(tags_count))
train(model, files, output, tags_count)
print('DONE')
if __name__ == '__main__':
main()
|
gpl-3.0
| 7,879,356,906,679,736,000 | 34.982517 | 113 | 0.598095 | false |
yajunyang/ShakeCamera
|
MicroAgent/local/proxylib.py
|
1
|
105526
|
#!/usr/bin/env python
# coding:utf-8
__version__ = '1.1'
import sys
import os
import glob
import errno
import time
import struct
import collections
import binascii
import zlib
import itertools
import re
import fnmatch
import io
import random
import base64
import string
import hashlib
import threading
import thread
import socket
import ssl
import logging
import select
import Queue
import SocketServer
import BaseHTTPServer
import httplib
import urllib
import urllib2
import urlparse
import OpenSSL
import dnslib
gevent = sys.modules.get('gevent') or logging.warn('please enable gevent.')
# Re-add sslwrap to Python 2.7.9
import inspect
__ssl__ = __import__('ssl')
try:
_ssl = __ssl__._ssl
except AttributeError:
_ssl = __ssl__._ssl2
def new_sslwrap(sock, server_side=False, keyfile=None, certfile=None, cert_reqs=__ssl__.CERT_NONE, ssl_version=__ssl__.PROTOCOL_SSLv23, ca_certs=None, ciphers=None):
context = __ssl__.SSLContext(ssl_version)
context.verify_mode = cert_reqs or __ssl__.CERT_NONE
if ca_certs:
context.load_verify_locations(ca_certs)
if certfile:
context.load_cert_chain(certfile, keyfile)
if ciphers:
context.set_ciphers(ciphers)
caller_self = inspect.currentframe().f_back.f_locals['self']
return context._wrap_socket(sock, server_side=server_side, ssl_sock=caller_self)
if not hasattr(_ssl, 'sslwrap'):
_ssl.sslwrap = new_sslwrap
try:
from Crypto.Cipher.ARC4 import new as RC4Cipher
except ImportError:
logging.warn('Load Crypto.Cipher.ARC4 Failed, Use Pure Python Instead.')
class RC4Cipher(object):
def __init__(self, key):
x = 0
box = range(256)
for i, y in enumerate(box):
x = (x + y + ord(key[i % len(key)])) & 0xff
box[i], box[x] = box[x], y
self.__box = box
self.__x = 0
self.__y = 0
def encrypt(self, data):
out = []
out_append = out.append
x = self.__x
y = self.__y
box = self.__box
for char in data:
x = (x + 1) & 0xff
y = (y + box[x]) & 0xff
box[x], box[y] = box[y], box[x]
out_append(chr(ord(char) ^ box[(box[x] + box[y]) & 0xff]))
self.__x = x
self.__y = y
return ''.join(out)
class XORCipher(object):
"""XOR Cipher Class"""
def __init__(self, key):
self.__key_gen = itertools.cycle([ord(x) for x in key]).next
self.__key_xor = lambda s: ''.join(chr(ord(x) ^ self.__key_gen()) for x in s)
if len(key) == 1:
try:
from Crypto.Util.strxor import strxor_c
c = ord(key)
self.__key_xor = lambda s: strxor_c(s, c)
except ImportError:
logging.debug('Load Crypto.Util.strxor Failed, Use Pure Python Instead.\n')
def encrypt(self, data):
return self.__key_xor(data)
class CipherFileObject(object):
"""fileobj wrapper for cipher"""
def __init__(self, fileobj, cipher):
self.__fileobj = fileobj
self.__cipher = cipher
def __getattr__(self, attr):
if attr not in ('__fileobj', '__cipher'):
return getattr(self.__fileobj, attr)
def read(self, size=-1):
return self.__cipher.encrypt(self.__fileobj.read(size))
class LRUCache(object):
"""http://pypi.python.org/pypi/lru/"""
def __init__(self, max_items=100):
self.cache = {}
self.key_order = []
self.max_items = max_items
def __setitem__(self, key, value):
self.cache[key] = value
self._mark(key)
def __getitem__(self, key):
value = self.cache[key]
self._mark(key)
return value
def __contains__(self, key):
return key in self.cache
def __len__(self):
return len(self.cache)
def _mark(self, key):
if key in self.key_order:
self.key_order.remove(key)
self.key_order.insert(0, key)
if len(self.key_order) > self.max_items:
index = self.max_items // 2
delitem = self.cache.__delitem__
key_order = self.key_order
any(delitem(key_order[x]) for x in xrange(index, len(key_order)))
self.key_order = self.key_order[:index]
def clear(self):
self.cache = {}
self.key_order = []
class CertUtil(object):
"""CertUtil module, based on mitmproxy"""
ca_vendor = 'GoAgent'
ca_keyfile = 'CA.crt'
ca_thumbprint = ''
ca_certdir = 'certs'
ca_digest = 'sha1' if sys.platform == 'win32' and sys.getwindowsversion() < (6,) else 'sha256'
ca_lock = threading.Lock()
@staticmethod
def create_ca():
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
req = OpenSSL.crypto.X509Req()
subj = req.get_subject()
subj.countryName = 'CN'
subj.stateOrProvinceName = 'Internet'
subj.localityName = 'Cernet'
subj.organizationName = CertUtil.ca_vendor
subj.organizationalUnitName = '%s Root' % CertUtil.ca_vendor
subj.commonName = '%s CA' % CertUtil.ca_vendor
req.set_pubkey(key)
req.sign(key, CertUtil.ca_digest)
ca = OpenSSL.crypto.X509()
ca.set_serial_number(0)
ca.gmtime_adj_notBefore(0)
ca.gmtime_adj_notAfter(24 * 60 * 60 * 3652)
ca.set_issuer(req.get_subject())
ca.set_subject(req.get_subject())
ca.set_pubkey(req.get_pubkey())
ca.sign(key, 'sha1')
return key, ca
@staticmethod
def dump_ca():
key, ca = CertUtil.create_ca()
with open(CertUtil.ca_keyfile, 'wb') as fp:
fp.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, ca))
fp.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key))
@staticmethod
def get_cert_serial_number(commonname):
assert CertUtil.ca_thumbprint
saltname = '%s|%s' % (CertUtil.ca_thumbprint, commonname)
return int(hashlib.md5(saltname.encode('utf-8')).hexdigest(), 16)
@staticmethod
def _get_cert(commonname, sans=()):
with open(CertUtil.ca_keyfile, 'rb') as fp:
content = fp.read()
key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM, content)
ca = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, content)
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
req = OpenSSL.crypto.X509Req()
subj = req.get_subject()
subj.countryName = 'CN'
subj.stateOrProvinceName = 'Internet'
subj.localityName = 'Cernet'
subj.organizationalUnitName = '%s Branch' % CertUtil.ca_vendor
if commonname[0] == '.':
subj.commonName = '*' + commonname
subj.organizationName = '*' + commonname
sans = ['*'+commonname] + [x for x in sans if x != '*'+commonname]
else:
subj.commonName = commonname
subj.organizationName = commonname
sans = [commonname] + [x for x in sans if x != commonname]
#req.add_extensions([OpenSSL.crypto.X509Extension(b'subjectAltName', True, ', '.join('DNS: %s' % x for x in sans)).encode()])
req.set_pubkey(pkey)
req.sign(pkey, CertUtil.ca_digest)
cert = OpenSSL.crypto.X509()
cert.set_version(2)
try:
cert.set_serial_number(CertUtil.get_cert_serial_number(commonname))
except OpenSSL.SSL.Error:
cert.set_serial_number(int(time.time()*1000))
cert.gmtime_adj_notBefore(-600) #avoid crt time error warning
cert.gmtime_adj_notAfter(60 * 60 * 24 * 3652)
cert.set_issuer(ca.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
if commonname[0] == '.':
sans = ['*'+commonname] + [s for s in sans if s != '*'+commonname]
else:
sans = [commonname] + [s for s in sans if s != commonname]
#cert.add_extensions([OpenSSL.crypto.X509Extension(b'subjectAltName', True, ', '.join('DNS: %s' % x for x in sans))])
cert.sign(key, CertUtil.ca_digest)
certfile = os.path.join(CertUtil.ca_certdir, commonname + '.crt')
with open(certfile, 'wb') as fp:
fp.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert))
fp.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, pkey))
return certfile
@staticmethod
def get_cert(commonname, sans=()):
if commonname.count('.') >= 2 and [len(x) for x in reversed(commonname.split('.'))] > [2, 4]:
commonname = '.'+commonname.partition('.')[-1]
certfile = os.path.join(CertUtil.ca_certdir, commonname + '.crt')
if os.path.exists(certfile):
return certfile
elif OpenSSL is None:
return CertUtil.ca_keyfile
else:
with CertUtil.ca_lock:
if os.path.exists(certfile):
return certfile
return CertUtil._get_cert(commonname, sans)
@staticmethod
def import_ca(certfile):
commonname = os.path.splitext(os.path.basename(certfile))[0]
if sys.platform.startswith('win'):
import ctypes
with open(certfile, 'rb') as fp:
certdata = fp.read()
if certdata.startswith(b'-----'):
begin = b'-----BEGIN CERTIFICATE-----'
end = b'-----END CERTIFICATE-----'
certdata = base64.b64decode(b''.join(certdata[certdata.find(begin)+len(begin):certdata.find(end)].strip().splitlines()))
crypt32 = ctypes.WinDLL(b'crypt32.dll'.decode())
store_handle = crypt32.CertOpenStore(10, 0, 0, 0x4000 | 0x20000, b'ROOT'.decode())
if not store_handle:
return -1
CERT_FIND_SUBJECT_STR = 0x00080007
CERT_FIND_HASH = 0x10000
X509_ASN_ENCODING = 0x00000001
class CRYPT_HASH_BLOB(ctypes.Structure):
_fields_ = [('cbData', ctypes.c_ulong), ('pbData', ctypes.c_char_p)]
assert CertUtil.ca_thumbprint
crypt_hash = CRYPT_HASH_BLOB(20, binascii.a2b_hex(CertUtil.ca_thumbprint.replace(':', '')))
crypt_handle = crypt32.CertFindCertificateInStore(store_handle, X509_ASN_ENCODING, 0, CERT_FIND_HASH, ctypes.byref(crypt_hash), None)
if crypt_handle:
crypt32.CertFreeCertificateContext(crypt_handle)
return 0
ret = crypt32.CertAddEncodedCertificateToStore(store_handle, 0x1, certdata, len(certdata), 4, None)
crypt32.CertCloseStore(store_handle, 0)
del crypt32
return 0 if ret else -1
elif sys.platform == 'darwin':
return os.system(('security find-certificate -a -c "%s" | grep "%s" >/dev/null || security add-trusted-cert -d -r trustRoot -k "/Library/Keychains/System.keychain" "%s"' % (commonname, commonname, certfile.decode('utf-8'))).encode('utf-8'))
elif sys.platform.startswith('linux'):
import platform
platform_distname = platform.dist()[0]
if platform_distname == 'Ubuntu':
pemfile = "/etc/ssl/certs/%s.pem" % commonname
new_certfile = "/usr/local/share/ca-certificates/%s.crt" % commonname
if not os.path.exists(pemfile):
return os.system('cp "%s" "%s" && update-ca-certificates' % (certfile, new_certfile))
elif any(os.path.isfile('%s/certutil' % x) for x in os.environ['PATH'].split(os.pathsep)):
return os.system('certutil -L -d sql:$HOME/.pki/nssdb | grep "%s" || certutil -d sql:$HOME/.pki/nssdb -A -t "C,," -n "%s" -i "%s"' % (commonname, commonname, certfile))
else:
logging.warning('please install *libnss3-tools* package to import GoAgent root ca')
return 0
@staticmethod
def remove_ca(name):
import ctypes
import ctypes.wintypes
class CERT_CONTEXT(ctypes.Structure):
_fields_ = [
('dwCertEncodingType', ctypes.wintypes.DWORD),
('pbCertEncoded', ctypes.POINTER(ctypes.wintypes.BYTE)),
('cbCertEncoded', ctypes.wintypes.DWORD),
('pCertInfo', ctypes.c_void_p),
('hCertStore', ctypes.c_void_p),]
crypt32 = ctypes.WinDLL(b'crypt32.dll'.decode())
store_handle = crypt32.CertOpenStore(10, 0, 0, 0x4000 | 0x20000, b'ROOT'.decode())
pCertCtx = crypt32.CertEnumCertificatesInStore(store_handle, None)
while pCertCtx:
certCtx = CERT_CONTEXT.from_address(pCertCtx)
certdata = ctypes.string_at(certCtx.pbCertEncoded, certCtx.cbCertEncoded)
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, certdata)
if hasattr(cert, 'get_subject'):
cert = cert.get_subject()
cert_name = next((v for k, v in cert.get_components() if k == 'CN'), '')
if cert_name and name == cert_name:
crypt32.CertDeleteCertificateFromStore(crypt32.CertDuplicateCertificateContext(pCertCtx))
pCertCtx = crypt32.CertEnumCertificatesInStore(store_handle, pCertCtx)
return 0
@staticmethod
def check_ca():
#Check CA exists
capath = os.path.join(os.path.dirname(os.path.abspath(__file__)), CertUtil.ca_keyfile)
certdir = os.path.join(os.path.dirname(os.path.abspath(__file__)), CertUtil.ca_certdir)
if not os.path.exists(capath):
if os.path.exists(certdir):
any(os.remove(x) for x in glob.glob(certdir+'/*.crt')+glob.glob(certdir+'/.*.crt'))
if os.name == 'nt':
try:
CertUtil.remove_ca('%s CA' % CertUtil.ca_vendor)
except Exception as e:
logging.warning('CertUtil.remove_ca failed: %r', e)
CertUtil.dump_ca()
with open(capath, 'rb') as fp:
CertUtil.ca_thumbprint = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, fp.read()).digest('sha1')
#Check Certs
certfiles = glob.glob(certdir+'/*.crt')+glob.glob(certdir+'/.*.crt')
if certfiles:
filename = random.choice(certfiles)
commonname = os.path.splitext(os.path.basename(filename))[0]
with open(filename, 'rb') as fp:
serial_number = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, fp.read()).get_serial_number()
if serial_number != CertUtil.get_cert_serial_number(commonname):
any(os.remove(x) for x in certfiles)
#Check CA imported
if CertUtil.import_ca(capath) != 0:
logging.warning('install root certificate failed, Please run as administrator/root/sudo')
#Check Certs Dir
if not os.path.exists(certdir):
os.makedirs(certdir)
class SSLConnection(object):
"""OpenSSL Connection Wapper"""
def __init__(self, context, sock):
self._context = context
self._sock = sock
self._connection = OpenSSL.SSL.Connection(context, sock)
self._makefile_refs = 0
def __getattr__(self, attr):
if attr not in ('_context', '_sock', '_connection', '_makefile_refs'):
return getattr(self._connection, attr)
def __iowait(self, io_func, *args, **kwargs):
timeout = self._sock.gettimeout() or 0.1
fd = self._sock.fileno()
while True:
try:
return io_func(*args, **kwargs)
except (OpenSSL.SSL.WantReadError, OpenSSL.SSL.WantX509LookupError):
sys.exc_clear()
_, _, errors = select.select([fd], [], [fd], timeout)
if errors:
break
except OpenSSL.SSL.WantWriteError:
sys.exc_clear()
_, _, errors = select.select([], [fd], [fd], timeout)
if errors:
break
def accept(self):
sock, addr = self._sock.accept()
client = OpenSSL.SSL.Connection(sock._context, sock)
return client, addr
def do_handshake(self):
self.__iowait(self._connection.do_handshake)
def connect(self, *args, **kwargs):
return self.__iowait(self._connection.connect, *args, **kwargs)
def __send(self, data, flags=0):
try:
return self.__iowait(self._connection.send, data, flags)
except OpenSSL.SSL.SysCallError as e:
if e[0] == -1 and not data:
# errors when writing empty strings are expected and can be ignored
return 0
raise
def __send_memoryview(self, data, flags=0):
if hasattr(data, 'tobytes'):
data = data.tobytes()
return self.__send(data, flags)
send = __send if sys.version_info >= (2, 7, 5) else __send_memoryview
def recv(self, bufsiz, flags=0):
pending = self._connection.pending()
if pending:
return self._connection.recv(min(pending, bufsiz))
try:
return self.__iowait(self._connection.recv, bufsiz, flags)
except OpenSSL.SSL.ZeroReturnError:
return ''
except OpenSSL.SSL.SysCallError as e:
if e[0] == -1 and 'Unexpected EOF' in e[1]:
# errors when reading empty strings are expected and can be ignored
return ''
raise
def read(self, bufsiz, flags=0):
return self.recv(bufsiz, flags)
def write(self, buf, flags=0):
return self.sendall(buf, flags)
def close(self):
if self._makefile_refs < 1:
self._connection = None
if self._sock:
socket.socket.close(self._sock)
else:
self._makefile_refs -= 1
def makefile(self, mode='r', bufsize=-1):
self._makefile_refs += 1
return socket._fileobject(self, mode, bufsize, close=True)
@staticmethod
def context_builder(ssl_version='SSLv23', ca_certs=None, cipher_suites=('ALL', '!aNULL', '!eNULL')):
protocol_version = getattr(OpenSSL.SSL, '%s_METHOD' % ssl_version)
ssl_context = OpenSSL.SSL.Context(protocol_version)
if ca_certs:
ssl_context.load_verify_locations(os.path.abspath(ca_certs))
ssl_context.set_verify(OpenSSL.SSL.VERIFY_PEER, lambda c, x, e, d, ok: ok)
else:
ssl_context.set_verify(OpenSSL.SSL.VERIFY_NONE, lambda c, x, e, d, ok: ok)
ssl_context.set_cipher_list(':'.join(cipher_suites))
return ssl_context
def openssl_set_session_cache_mode(context, mode):
assert isinstance(context, OpenSSL.SSL.Context)
try:
import ctypes
SSL_CTRL_SET_SESS_CACHE_MODE = 44
SESS_CACHE_OFF = 0x0
SESS_CACHE_CLIENT = 0x1
SESS_CACHE_SERVER = 0x2
SESS_CACHE_BOTH = 0x3
c_mode = {'off':SESS_CACHE_OFF, 'client':SESS_CACHE_CLIENT, 'server':SESS_CACHE_SERVER, 'both':SESS_CACHE_BOTH}[mode.lower()]
if hasattr(context, 'set_session_cache_mode'):
context.set_session_cache_mode(c_mode)
elif OpenSSL.__version__ == '0.13':
# http://bazaar.launchpad.net/~exarkun/pyopenssl/release-0.13/view/head:/OpenSSL/ssl/context.h#L27
c_context = ctypes.c_void_p.from_address(id(context)+ctypes.sizeof(ctypes.c_int)+ctypes.sizeof(ctypes.c_voidp))
if os.name == 'nt':
# https://github.com/openssl/openssl/blob/92c78463720f71e47c251ffa58493e32cd793e13/ssl/ssl.h#L884
ctypes.c_int.from_address(c_context.value+ctypes.sizeof(ctypes.c_voidp)*7+ctypes.sizeof(ctypes.c_ulong)).value = c_mode
else:
import ctypes.util
# FIXME
# ctypes.cdll.LoadLibrary(ctypes.util.find_library('ssl')).SSL_CTX_ctrl(c_context, SSL_CTRL_SET_SESS_CACHE_MODE, c_mode, None)
except Exception as e:
logging.warning('openssl_set_session_cache_mode failed: %r', e)
class ProxyUtil(object):
"""ProxyUtil module, based on urllib2"""
@staticmethod
def parse_proxy(proxy):
return urllib2._parse_proxy(proxy)
@staticmethod
def get_system_proxy():
proxies = urllib2.getproxies()
return proxies.get('https') or proxies.get('http') or {}
@staticmethod
def get_listen_ip():
listen_ip = '127.0.0.1'
sock = None
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('8.8.8.8', 53))
listen_ip = sock.getsockname()[0]
except StandardError:
pass
finally:
if sock:
sock.close()
return listen_ip
def inflate(data):
return zlib.decompress(data, -zlib.MAX_WBITS)
def deflate(data):
return zlib.compress(data)[2:-4]
def message_html(title, banner, detail=''):
MESSAGE_TEMPLATE = '''
<html><head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<title>$title</title>
<style><!--
body {font-family: arial,sans-serif}
div.nav {margin-top: 1ex}
div.nav A {font-size: 10pt; font-family: arial,sans-serif}
span.nav {font-size: 10pt; font-family: arial,sans-serif; font-weight: bold}
div.nav A,span.big {font-size: 12pt; color: #0000cc}
div.nav A {font-size: 10pt; color: black}
A.l:link {color: #6f6f6f}
A.u:link {color: green}
//--></style>
</head>
<body text=#000000 bgcolor=#ffffff>
<table border=0 cellpadding=2 cellspacing=0 width=100%>
<tr><td bgcolor=#3366cc><font face=arial,sans-serif color=#ffffff><b>Message From LocalProxy</b></td></tr>
<tr><td> </td></tr></table>
<blockquote>
<H1>$banner</H1>
$detail
<p>
</blockquote>
<table width=100% cellpadding=0 cellspacing=0><tr><td bgcolor=#3366cc><img alt="" width=1 height=4></td></tr></table>
</body></html>
'''
return string.Template(MESSAGE_TEMPLATE).substitute(title=title, banner=banner, detail=detail)
def parse_hostport(host, default_port=80):
m = re.match(r'(.+)[#](\d+)$', host)
if m:
return m.group(1).strip('[]'), int(m.group(2))
else:
return host.strip('[]'), default_port
def dnslib_resolve_over_udp(query, dnsservers, timeout, **kwargs):
"""
http://gfwrev.blogspot.com/2009/11/gfwdns.html
http://zh.wikipedia.org/wiki/%E5%9F%9F%E5%90%8D%E6%9C%8D%E5%8A%A1%E5%99%A8%E7%BC%93%E5%AD%98%E6%B1%A1%E6%9F%93
http://support.microsoft.com/kb/241352
https://gist.github.com/klzgrad/f124065c0616022b65e5
"""
if not isinstance(query, (basestring, dnslib.DNSRecord)):
raise TypeError('query argument requires string/DNSRecord')
blacklist = kwargs.get('blacklist', ())
blacklist_prefix = tuple(x for x in blacklist if x.endswith('.'))
turstservers = kwargs.get('turstservers', ())
dns_v4_servers = [x for x in dnsservers if ':' not in x]
dns_v6_servers = [x for x in dnsservers if ':' in x]
sock_v4 = sock_v6 = None
socks = []
if dns_v4_servers:
sock_v4 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
socks.append(sock_v4)
if dns_v6_servers:
sock_v6 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
socks.append(sock_v6)
timeout_at = time.time() + timeout
try:
for _ in xrange(4):
try:
for dnsserver in dns_v4_servers:
if isinstance(query, basestring):
if dnsserver in ('8.8.8.8', '8.8.4.4'):
query = '.'.join(x[:-1] + x[-1].upper() for x in query.split('.')).title()
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query))
query_data = query.pack()
if query.q.qtype == 1 and dnsserver in ('8.8.8.8', '8.8.4.4'):
query_data = query_data[:-5] + '\xc0\x04' + query_data[-4:]
sock_v4.sendto(query_data, parse_hostport(dnsserver, 53))
for dnsserver in dns_v6_servers:
if isinstance(query, basestring):
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query, qtype=dnslib.QTYPE.AAAA))
query_data = query.pack()
sock_v6.sendto(query_data, parse_hostport(dnsserver, 53))
while time.time() < timeout_at:
ins, _, _ = select.select(socks, [], [], 0.1)
for sock in ins:
reply_data, reply_address = sock.recvfrom(512)
reply_server = reply_address[0]
record = dnslib.DNSRecord.parse(reply_data)
iplist = [str(x.rdata) for x in record.rr if x.rtype in (1, 28, 255)]
if any(x in blacklist or x.startswith(blacklist_prefix) for x in iplist):
logging.warning('qname=%r dnsservers=%r record bad iplist=%r', query.q.qname, dnsservers, iplist)
elif record.header.rcode and not iplist and reply_server in turstservers:
logging.info('qname=%r trust reply_server=%r record rcode=%s', query.q.qname, reply_server, record.header.rcode)
return record
elif iplist:
logging.debug('qname=%r reply_server=%r record iplist=%s', query.q.qname, reply_server, iplist)
return record
else:
logging.debug('qname=%r reply_server=%r record null iplist=%s', query.q.qname, reply_server, iplist)
continue
except socket.error as e:
logging.warning('handle dns query=%s socket: %r', query, e)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
finally:
for sock in socks:
sock.close()
def dnslib_resolve_over_tcp(query, dnsservers, timeout, **kwargs):
"""dns query over tcp"""
if not isinstance(query, (basestring, dnslib.DNSRecord)):
raise TypeError('query argument requires string/DNSRecord')
blacklist = kwargs.get('blacklist', ())
blacklist_prefix = tuple(x for x in blacklist if x.endswith('.'))
def do_resolve(query, dnsserver, timeout, queobj):
if isinstance(query, basestring):
qtype = dnslib.QTYPE.AAAA if ':' in dnsserver else dnslib.QTYPE.A
query = dnslib.DNSRecord(q=dnslib.DNSQuestion(query, qtype=qtype))
query_data = query.pack()
sock_family = socket.AF_INET6 if ':' in dnsserver else socket.AF_INET
sock = socket.socket(sock_family)
rfile = None
try:
sock.settimeout(timeout or None)
sock.connect(parse_hostport(dnsserver, 53))
sock.send(struct.pack('>h', len(query_data)) + query_data)
rfile = sock.makefile('r', 1024)
reply_data_length = rfile.read(2)
if len(reply_data_length) < 2:
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query.q.qname, dnsserver))
reply_data = rfile.read(struct.unpack('>h', reply_data_length)[0])
record = dnslib.DNSRecord.parse(reply_data)
iplist = [str(x.rdata) for x in record.rr if x.rtype in (1, 28, 255)]
if any(x in blacklist or x.startswith(blacklist_prefix) for x in iplist):
logging.debug('qname=%r dnsserver=%r record bad iplist=%r', query.q.qname, dnsserver, iplist)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsserver))
else:
logging.debug('qname=%r dnsserver=%r record iplist=%s', query.q.qname, dnsserver, iplist)
queobj.put(record)
except socket.error as e:
logging.debug('qname=%r dnsserver=%r failed %r', query.q.qname, dnsserver, e)
queobj.put(e)
finally:
if rfile:
rfile.close()
sock.close()
queobj = Queue.Queue()
for dnsserver in dnsservers:
thread.start_new_thread(do_resolve, (query, dnsserver, timeout, queobj))
for i in range(len(dnsservers)):
try:
result = queobj.get(timeout)
except Queue.Empty:
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
if result and not isinstance(result, Exception):
return result
elif i == len(dnsservers) - 1:
logging.warning('dnslib_resolve_over_tcp %r with %s return %r', query, dnsservers, result)
raise socket.gaierror(11004, 'getaddrinfo %r from %r failed' % (query, dnsservers))
def dnslib_record2iplist(record):
"""convert dnslib.DNSRecord to iplist"""
assert isinstance(record, dnslib.DNSRecord)
iplist = [x for x in (str(r.rdata) for r in record.rr) if re.match(r'^\d+\.\d+\.\d+\.\d+$', x) or ':' in x]
return iplist
def get_dnsserver_list():
if os.name == 'nt':
import ctypes
import ctypes.wintypes
DNS_CONFIG_DNS_SERVER_LIST = 6
buf = ctypes.create_string_buffer(2048)
ctypes.windll.dnsapi.DnsQueryConfig(DNS_CONFIG_DNS_SERVER_LIST, 0, None, None, ctypes.byref(buf), ctypes.byref(ctypes.wintypes.DWORD(len(buf))))
ipcount = struct.unpack('I', buf[0:4])[0]
iplist = [socket.inet_ntoa(buf[i:i+4]) for i in xrange(4, ipcount*4+4, 4)]
return iplist
elif os.path.isfile('/etc/resolv.conf'):
with open('/etc/resolv.conf', 'rb') as fp:
return re.findall(r'(?m)^nameserver\s+(\S+)', fp.read())
else:
logging.warning("get_dnsserver_list failed: unsupport platform '%s-%s'", sys.platform, os.name)
return []
def spawn_later(seconds, target, *args, **kwargs):
def wrap(*args, **kwargs):
time.sleep(seconds)
return target(*args, **kwargs)
return thread.start_new_thread(wrap, args, kwargs)
def is_clienthello(data):
if len(data) < 20:
return False
if data.startswith('\x16\x03'):
# TLSv12/TLSv11/TLSv1/SSLv3
length, = struct.unpack('>h', data[3:5])
return len(data) == 5 + length
elif data[0] == '\x80' and data[2:4] == '\x01\x03':
# SSLv23
return len(data) == 2 + ord(data[1])
else:
return False
def extract_sni_name(packet):
if packet.startswith('\x16\x03'):
stream = io.BytesIO(packet)
stream.read(0x2b)
session_id_length = ord(stream.read(1))
stream.read(session_id_length)
cipher_suites_length, = struct.unpack('>h', stream.read(2))
stream.read(cipher_suites_length+2)
extensions_length, = struct.unpack('>h', stream.read(2))
# extensions = {}
while True:
data = stream.read(2)
if not data:
break
etype, = struct.unpack('>h', data)
elen, = struct.unpack('>h', stream.read(2))
edata = stream.read(elen)
if etype == 0:
server_name = edata[5:]
return server_name
def random_hostname():
word = ''.join(random.choice(('bcdfghjklmnpqrstvwxyz', 'aeiou')[x&1]) for x in xrange(random.randint(5, 10)))
gltd = random.choice(['org', 'com', 'net', 'gov', 'cn'])
return 'www.%s.%s' % (word, gltd)
def get_uptime():
if os.name == 'nt':
import ctypes
try:
tick = ctypes.windll.kernel32.GetTickCount64()
except AttributeError:
tick = ctypes.windll.kernel32.GetTickCount()
return tick / 1000.0
elif os.path.isfile('/proc/uptime'):
with open('/proc/uptime', 'rb') as fp:
uptime = fp.readline().strip().split()[0].strip()
return float(uptime)
elif any(os.path.isfile(os.path.join(x, 'uptime')) for x in os.environ['PATH'].split(os.pathsep)):
# http://www.opensource.apple.com/source/lldb/lldb-69/test/pexpect-2.4/examples/uptime.py
pattern = r'up\s+(.*?),\s+([0-9]+) users?,\s+load averages?: ([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9]),?\s+([0-9]+\.[0-9][0-9])'
output = os.popen('uptime').read()
duration, _, _, _, _ = re.search(pattern, output).groups()
days, hours, mins = 0, 0, 0
if 'day' in duration:
m = re.search(r'([0-9]+)\s+day', duration)
days = int(m.group(1))
if ':' in duration:
m = re.search(r'([0-9]+):([0-9]+)', duration)
hours = int(m.group(1))
mins = int(m.group(2))
if 'min' in duration:
m = re.search(r'([0-9]+)\s+min', duration)
mins = int(m.group(1))
return days * 86400 + hours * 3600 + mins * 60
else:
#TODO: support other platforms
return None
def get_process_list():
import ctypes
Process = collections.namedtuple('Process', 'pid name exe')
process_list = []
if os.name == 'nt':
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
lpidProcess = (ctypes.c_ulong * 1024)()
cb = ctypes.sizeof(lpidProcess)
cbNeeded = ctypes.c_ulong()
ctypes.windll.psapi.EnumProcesses(ctypes.byref(lpidProcess), cb, ctypes.byref(cbNeeded))
nReturned = cbNeeded.value/ctypes.sizeof(ctypes.c_ulong())
pidProcess = [i for i in lpidProcess][:nReturned]
has_queryimage = hasattr(ctypes.windll.kernel32, 'QueryFullProcessImageNameA')
for pid in pidProcess:
hProcess = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, 0, pid)
if hProcess:
modname = ctypes.create_string_buffer(2048)
count = ctypes.c_ulong(ctypes.sizeof(modname))
if has_queryimage:
ctypes.windll.kernel32.QueryFullProcessImageNameA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
else:
ctypes.windll.psapi.GetModuleFileNameExA(hProcess, 0, ctypes.byref(modname), ctypes.byref(count))
exe = modname.value
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
ctypes.windll.kernel32.CloseHandle(hProcess)
elif sys.platform.startswith('linux'):
for filename in glob.glob('/proc/[0-9]*/cmdline'):
pid = int(filename.split('/')[2])
exe_link = '/proc/%d/exe' % pid
if os.path.exists(exe_link):
exe = os.readlink(exe_link)
name = os.path.basename(exe)
process_list.append(Process(pid=pid, name=name, exe=exe))
else:
try:
import psutil
process_list = psutil.get_process_list()
except StandardError as e:
logging.exception('psutil.get_process_list() failed: %r', e)
return process_list
def forward_socket(local, remote, timeout, bufsize):
"""forward socket"""
def __io_copy(dest, source, timeout):
try:
dest.settimeout(timeout)
source.settimeout(timeout)
while 1:
data = source.recv(bufsize)
if not data:
break
dest.sendall(data)
except socket.timeout:
pass
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.ENOTCONN, errno.EPIPE):
raise
if e.args[0] in (errno.EBADF,):
return
finally:
for sock in (dest, source):
try:
sock.close()
except StandardError:
pass
thread.start_new_thread(__io_copy, (remote.dup(), local.dup(), timeout))
__io_copy(local, remote, timeout)
class LocalProxyServer(SocketServer.ThreadingTCPServer):
"""Local Proxy Server"""
request_queue_size = 1024
allow_reuse_address = True
daemon_threads = True
def close_request(self, request):
try:
request.close()
except StandardError:
pass
def finish_request(self, request, client_address):
try:
self.RequestHandlerClass(request, client_address, self)
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
raise
def handle_error(self, *args):
"""make ThreadingTCPServer happy"""
exc_info = sys.exc_info()
error = exc_info and len(exc_info) and exc_info[1]
if isinstance(error, (socket.error, ssl.SSLError, OpenSSL.SSL.Error)) and len(error.args) > 1 and 'bad write retry' in error.args[1]:
exc_info = error = None
else:
del exc_info, error
SocketServer.ThreadingTCPServer.handle_error(self, *args)
class BaseFetchPlugin(object):
"""abstract fetch plugin"""
def __init__(self, *args, **kwargs):
pass
def handle(self, handler, **kwargs):
raise NotImplementedError
class MockFetchPlugin(BaseFetchPlugin):
"""mock fetch plugin"""
def handle(self, handler, status=400, headers={}, body=''):
"""mock response"""
logging.info('%s "MOCK %s %s %s" %d %d', handler.address_string(), handler.command, handler.path, handler.protocol_version, status, len(body))
headers = dict((k.title(), v) for k, v in headers.items())
if 'Transfer-Encoding' in headers:
del headers['Transfer-Encoding']
if 'Content-Length' not in headers:
headers['Content-Length'] = len(body)
if 'Connection' not in headers:
headers['Connection'] = 'close'
handler.send_response(status)
for key, value in headers.items():
handler.send_header(key, value)
handler.end_headers()
handler.wfile.write(body)
class StripPlugin(BaseFetchPlugin):
"""strip fetch plugin"""
def __init__(self, ssl_version='SSLv23', ciphers='ALL:!aNULL:!eNULL', cache_size=128, session_cache=True):
self.ssl_method = getattr(ssl, 'PROTOCOL_%s' % ssl_version)
self.ciphers = ciphers
def do_ssl_handshake(self, handler):
"do_ssl_handshake with ssl"
certfile = CertUtil.get_cert(handler.host)
ssl_sock = ssl.wrap_socket(handler.connection, keyfile=certfile, certfile=certfile, server_side=True, ssl_version=self.ssl_method, ciphers=self.ciphers)
handler.connection = ssl_sock
handler.rfile = handler.connection.makefile('rb', handler.bufsize)
handler.wfile = handler.connection.makefile('wb', 0)
handler.scheme = 'https'
def handle(self, handler, do_ssl_handshake=True):
"""strip connect"""
logging.info('%s "STRIP %s %s:%d %s" - -', handler.address_string(), handler.command, handler.host, handler.port, handler.protocol_version)
handler.send_response(200)
handler.end_headers()
if do_ssl_handshake:
try:
self.do_ssl_handshake(handler)
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET) or (len(e.args) > 1 and e.args[1] == 'Unexpected EOF'):
logging.exception('ssl.wrap_socket(connection=%r) failed: %s', handler.connection, e)
return
try:
handler.raw_requestline = handler.rfile.readline(65537)
if len(handler.raw_requestline) > 65536:
handler.requestline = ''
handler.request_version = ''
handler.command = ''
handler.send_error(414)
handler.wfile.close()
return
if not handler.raw_requestline:
handler.close_connection = 1
return
if not handler.parse_request():
handler.send_error(400)
handler.wfile.close()
return
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e.args[0] in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
handler.close_connection = 1
return
else:
raise
try:
handler.do_METHOD()
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ETIMEDOUT, errno.EPIPE):
raise
class StripPluginEx(StripPlugin):
"""strip fetch plugin"""
def __init__(self, ssl_version='SSLv23', ciphers='ALL:!aNULL:!eNULL', cache_size=128, session_cache=True):
self.ssl_method = getattr(OpenSSL.SSL, '%s_METHOD' % ssl_version)
self.ciphers = ciphers
self.ssl_context_cache = LRUCache(cache_size*2)
self.ssl_session_cache = session_cache
def get_ssl_context_by_hostname(self, hostname):
try:
return self.ssl_context_cache[hostname]
except LookupError:
context = OpenSSL.SSL.Context(self.ssl_method)
certfile = CertUtil.get_cert(hostname)
if certfile in self.ssl_context_cache:
context = self.ssl_context_cache[hostname] = self.ssl_context_cache[certfile]
return context
with open(certfile, 'rb') as fp:
pem = fp.read()
context.use_certificate(OpenSSL.crypto.load_certificate(OpenSSL.SSL.FILETYPE_PEM, pem))
context.use_privatekey(OpenSSL.crypto.load_privatekey(OpenSSL.SSL.FILETYPE_PEM, pem))
if self.ciphers:
context.set_cipher_list(self.ciphers)
self.ssl_context_cache[hostname] = self.ssl_context_cache[certfile] = context
if self.ssl_session_cache:
openssl_set_session_cache_mode(context, 'server')
return context
def do_ssl_handshake(self, handler):
"do_ssl_handshake with OpenSSL"
ssl_sock = SSLConnection(self.get_ssl_context_by_hostname(handler.host), handler.connection)
ssl_sock.set_accept_state()
ssl_sock.do_handshake()
handler.connection = ssl_sock
handler.rfile = handler.connection.makefile('rb', handler.bufsize)
handler.wfile = handler.connection.makefile('wb', 0)
handler.scheme = 'https'
class DirectFetchPlugin(BaseFetchPlugin):
"""direct fetch plugin"""
connect_timeout = 4
read_timeout = 16
max_retry = 3
def handle(self, handler, **kwargs):
if handler.command != 'CONNECT':
return self.handle_method(handler, kwargs)
else:
return self.handle_connect(handler, kwargs)
def handle_method(self, handler, kwargs):
rescue_bytes = int(kwargs.pop('rescue_bytes', 0))
method = handler.command
if handler.path.lower().startswith(('http://', 'https://', 'ftp://')):
url = handler.path
else:
url = 'http://%s%s' % (handler.headers['Host'], handler.path)
headers = dict((k.title(), v) for k, v in handler.headers.items())
body = handler.body
response = None
try:
if rescue_bytes:
headers['Range'] = 'bytes=%d-' % rescue_bytes
response = handler.net2.create_http_request(method, url, headers, body, timeout=handler.net2.connect_timeout, read_timeout=self.read_timeout, **kwargs)
logging.info('%s "DIRECT %s %s %s" %s %s', handler.address_string(), handler.command, url, handler.protocol_version, response.status, response.getheader('Content-Length', '-'))
response_headers = dict((k.title(), v) for k, v in response.getheaders())
if not rescue_bytes:
handler.send_response(response.status)
for key, value in response.getheaders():
handler.send_header(key, value)
handler.end_headers()
if handler.command == 'HEAD' or response.status in (204, 304):
response.close()
return
need_chunked = 'Transfer-Encoding' in response_headers
bufsize = 8192
written = rescue_bytes
while True:
data = None
with gevent.Timeout(handler.net2.connect_timeout, False):
data = response.read(bufsize)
if data is None:
logging.warning('DIRECT response.read(%r) %r timeout', bufsize, url)
if response.getheader('Accept-Ranges', '') == 'bytes' and not urlparse.urlparse(url).query:
kwargs['rescue_bytes'] = written
return self.handle(handler, **kwargs)
handler.close_connection = True
break
if not data:
if need_chunked:
handler.wfile.write('0\r\n\r\n')
break
if need_chunked:
handler.wfile.write('%x\r\n' % len(data))
handler.wfile.write(data)
written += len(data)
if need_chunked:
handler.wfile.write('\r\n')
del data
except (ssl.SSLError, socket.timeout, socket.error):
if response:
if response.fp and response.fp._sock:
response.fp._sock.close()
response.close()
finally:
if response:
response.close()
def handle_connect(self, handler, kwargs):
"""forward socket"""
host = handler.host
port = handler.port
local = handler.connection
remote = None
handler.send_response(200)
handler.end_headers()
handler.close_connection = 1
data = local.recv(1024)
if not data:
local.close()
return
data_is_clienthello = is_clienthello(data)
if data_is_clienthello:
kwargs['client_hello'] = data
for i in xrange(self.max_retry):
try:
remote = handler.net2.create_tcp_connection(host, port, handler.net2.connect_timeout, **kwargs)
if not data_is_clienthello and remote and not isinstance(remote, Exception):
remote.sendall(data)
break
except StandardError as e:
logging.exception('%s "FORWARD %s %s:%d %s" %r', handler.address_string(), handler.command, host, port, handler.protocol_version, e)
if hasattr(remote, 'close'):
remote.close()
if i == self.max_retry - 1:
raise
logging.info('%s "FORWARD %s %s:%d %s" - -', handler.address_string(), handler.command, host, port, handler.protocol_version)
if hasattr(remote, 'fileno'):
# reset timeout default to avoid long http upload failure, but it will delay timeout retry :(
remote.settimeout(None)
data = data_is_clienthello and getattr(remote, 'data', None)
if data:
del remote.data
local.sendall(data)
forward_socket(local, remote, 60, bufsize=256*1024)
class BaseProxyHandlerFilter(object):
"""base proxy handler filter"""
def filter(self, handler):
raise NotImplementedError
class SimpleProxyHandlerFilter(BaseProxyHandlerFilter):
"""simple proxy handler filter"""
def filter(self, handler):
return 'direct', {}
class MIMTProxyHandlerFilter(BaseProxyHandlerFilter):
"""mimt proxy handler filter"""
def filter(self, handler):
if handler.command == 'CONNECT':
return 'strip', {}
else:
return 'direct', {}
class DirectRegionFilter(BaseProxyHandlerFilter):
"""direct region filter"""
region_cache = LRUCache(16*1024)
def __init__(self, regions):
self.regions = set(regions)
try:
import pygeoip
self.geoip = pygeoip.GeoIP(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'GeoIP.dat'))
except StandardError as e:
logging.error('DirectRegionFilter init pygeoip failed: %r', e)
sys.exit(-1)
def get_country_code(self, hostname, dnsservers):
"""http://dev.maxmind.com/geoip/legacy/codes/iso3166/"""
try:
return self.region_cache[hostname]
except KeyError:
pass
try:
if re.match(r'^\d+\.\d+\.\d+\.\d+$', hostname) or ':' in hostname:
iplist = [hostname]
elif dnsservers:
iplist = dnslib_record2iplist(dnslib_resolve_over_udp(hostname, dnsservers, timeout=2))
else:
iplist = socket.gethostbyname_ex(hostname)[-1]
if iplist[0].startswith(('127.', '192.168.', '10.')):
country_code = 'LOCAL'
else:
country_code = self.geoip.country_code_by_addr(iplist[0])
except StandardError as e:
logging.warning('DirectRegionFilter cannot determine region for hostname=%r %r', hostname, e)
country_code = ''
self.region_cache[hostname] = country_code
return country_code
def filter(self, handler):
country_code = self.get_country_code(handler.host, handler.dns_servers)
if country_code in self.regions:
return 'direct', {}
class AuthFilter(BaseProxyHandlerFilter):
"""authorization filter"""
auth_info = "Proxy authentication required"""
white_list = set(['127.0.0.1'])
def __init__(self, username, password):
self.username = username
self.password = password
def check_auth_header(self, auth_header):
method, _, auth_data = auth_header.partition(' ')
if method == 'Basic':
username, _, password = base64.b64decode(auth_data).partition(':')
if username == self.username and password == self.password:
return True
return False
def filter(self, handler):
if self.white_list and handler.client_address[0] in self.white_list:
return None
auth_header = handler.headers.get('Proxy-Authorization') or getattr(handler, 'auth_header', None)
if auth_header and self.check_auth_header(auth_header):
handler.auth_header = auth_header
else:
headers = {'Access-Control-Allow-Origin': '*',
'Proxy-Authenticate': 'Basic realm="%s"' % self.auth_info,
'Content-Length': '0',
'Connection': 'keep-alive'}
return 'mock', {'status': 407, 'headers': headers, 'body': ''}
class UserAgentFilter(BaseProxyHandlerFilter):
"""user agent filter"""
def __init__(self, user_agent):
self.user_agent = user_agent
def filter(self, handler):
handler.headers['User-Agent'] = self.user_agent
class ForceHttpsFilter(BaseProxyHandlerFilter):
"""force https filter"""
def __init__(self, forcehttps_sites, noforcehttps_sites):
self.forcehttps_sites = tuple(forcehttps_sites)
self.noforcehttps_sites = set(noforcehttps_sites)
def filter(self, handler):
if handler.command != 'CONNECT' and handler.host.endswith(self.forcehttps_sites) and handler.host not in self.noforcehttps_sites:
if not handler.headers.get('Referer', '').startswith('https://') and not handler.path.startswith('https://'):
logging.debug('ForceHttpsFilter metched %r %r', handler.path, handler.headers)
headers = {'Location': handler.path.replace('http://', 'https://', 1), 'Content-Length': '0'}
return 'mock', {'status': 301, 'headers': headers, 'body': ''}
class FakeHttpsFilter(BaseProxyHandlerFilter):
"""fake https filter"""
def __init__(self, fakehttps_sites, nofakehttps_sites):
self.fakehttps_sites = tuple(fakehttps_sites)
self.nofakehttps_sites = set(nofakehttps_sites)
def filter(self, handler):
if handler.command == 'CONNECT' and handler.host.endswith(self.fakehttps_sites) and handler.host not in self.nofakehttps_sites:
logging.debug('FakeHttpsFilter metched %r %r', handler.path, handler.headers)
return 'strip', {}
class CRLFSitesFilter(BaseProxyHandlerFilter):
"""crlf sites filter"""
def __init__(self, crlf_sites, nocrlf_sites):
self.crlf_sites = tuple(crlf_sites)
self.nocrlf_sites = set(nocrlf_sites)
def filter(self, handler):
if handler.command != 'CONNECT' and handler.scheme != 'https':
if handler.host.endswith(self.crlf_sites) and handler.host not in self.nocrlf_sites:
logging.debug('CRLFSitesFilter metched %r %r', handler.path, handler.headers)
handler.close_connection = True
return 'direct', {'crlf': True}
class URLRewriteFilter(BaseProxyHandlerFilter):
"""url rewrite filter"""
def __init__(self, urlrewrite_map, forcehttps_sites, noforcehttps_sites):
self.urlrewrite_map = {}
for regex, repl in urlrewrite_map.items():
mo = re.search(r'://([^/:]+)', regex)
if not mo:
logging.warning('URLRewriteFilter does not support regex: %r', regex)
continue
addr = mo.group(1).replace(r'\.', '.')
mo = re.match(r'[\w\-\_\d\[\]\:]+', addr)
if not mo:
logging.warning('URLRewriteFilter does not support wildcard host: %r', addr)
self.urlrewrite_map.setdefault(addr, []).append((re.compile(regex).search, repl))
self.forcehttps_sites = tuple(forcehttps_sites)
self.noforcehttps_sites = set(noforcehttps_sites)
def filter(self, handler):
if handler.host not in self.urlrewrite_map:
return
for match, repl in self.urlrewrite_map[handler.host]:
mo = match(handler.path)
if mo:
logging.debug('URLRewriteFilter metched %r', handler.path)
if repl.startswith('file://'):
return self.filter_localfile(handler, mo, repl)
else:
return self.filter_redirect(handler, mo, repl)
def filter_redirect(self, handler, mo, repl):
for i, g in enumerate(mo.groups()):
repl = repl.replace('$%d' % (i+1), urllib.unquote_plus(g))
if repl.startswith('http://') and self.forcehttps_sites:
hostname = urlparse.urlsplit(repl).hostname
if hostname.endswith(self.forcehttps_sites) and hostname not in self.noforcehttps_sites:
repl = 'https://%s' % repl[len('http://'):]
headers = {'Location': repl, 'Content-Length': '0'}
return 'mock', {'status': 302, 'headers': headers, 'body': ''}
def filter_localfile(self, handler, mo, repl):
filename = repl.lstrip('file://')
if filename.lower() in ('/dev/null', 'nul'):
filename = os.devnull
if os.name == 'nt':
filename = filename.lstrip('/')
content_type = None
try:
import mimetypes
content_type = mimetypes.types_map.get(os.path.splitext(filename)[1])
except StandardError as e:
logging.error('import mimetypes failed: %r', e)
try:
with open(filename, 'rb') as fp:
data = fp.read()
headers = {'Connection': 'close', 'Content-Length': str(len(data))}
if content_type:
headers['Content-Type'] = content_type
return 'mock', {'status': 200, 'headers': headers, 'body': data}
except StandardError as e:
return 'mock', {'status': 403, 'headers': {'Connection': 'close'}, 'body': 'read %r %r' % (filename, e)}
class AutoRangeFilter(BaseProxyHandlerFilter):
"""auto range filter"""
def __init__(self, hosts_patterns, endswith_exts, noendswith_exts, maxsize):
self.hosts_match = [re.compile(fnmatch.translate(h)).match for h in hosts_patterns]
self.endswith_exts = tuple(endswith_exts)
self.noendswith_exts = tuple(noendswith_exts)
self.maxsize = int(maxsize)
def filter(self, handler):
path = urlparse.urlsplit(handler.path).path
need_autorange = any(x(handler.host) for x in self.hosts_match) or path.endswith(self.endswith_exts)
if path.endswith(self.noendswith_exts) or 'range=' in urlparse.urlsplit(path).query or handler.command == 'HEAD':
return None
if handler.command != 'HEAD' and handler.headers.get('Range'):
m = re.search(r'bytes=(\d+)-', handler.headers['Range'])
start = int(m.group(1) if m else 0)
handler.headers['Range'] = 'bytes=%d-%d' % (start, start+self.maxsize-1)
logging.info('autorange range=%r match url=%r', handler.headers['Range'], handler.path)
elif need_autorange:
logging.info('Found [autorange]endswith match url=%r', handler.path)
m = re.search(r'bytes=(\d+)-', handler.headers.get('Range', ''))
start = int(m.group(1) if m else 0)
handler.headers['Range'] = 'bytes=%d-%d' % (start, start+self.maxsize-1)
class StaticFileFilter(BaseProxyHandlerFilter):
"""static file filter"""
index_file = 'index.html'
allow_exts = ['.crt', '.pac', '.crx', '.bak', '.htm', '.html', '.js', '.css', '.png', '.gif', '.jpg']
def format_index_html(self, dirname):
INDEX_TEMPLATE = u'''
<html>
<title>Directory listing for $dirname</title>
<body>
<h2>Directory listing for $dirname</h2>
<hr>
<ul>
$html
</ul>
<hr>
</body></html>
'''
html = ''
if not isinstance(dirname, unicode):
dirname = dirname.decode(sys.getfilesystemencoding())
for name in os.listdir(dirname):
if os.path.splitext(name)[1] not in self.allow_exts:
continue
fullname = os.path.join(dirname, name)
suffix = u'/' if os.path.isdir(fullname) else u''
html += u'<li><a href="%s%s">%s%s</a>\r\n' % (name, suffix, name, suffix)
return string.Template(INDEX_TEMPLATE).substitute(dirname=dirname, html=html)
def filter(self, handler):
path = urlparse.urlsplit(handler.path).path
if path.startswith('/'):
path = urllib.unquote_plus(path.lstrip('/') or '.').decode('utf8')
path = '/'.join(x for x in path.split('/') if x != '..')
if os.path.isdir(path):
index_file = os.path.join(path, self.index_file)
if not os.path.isfile(index_file):
content = self.format_index_html(path).encode('UTF-8')
headers = {'Content-Type': 'text/html; charset=utf-8', 'Connection': 'close'}
return 'mock', {'status': 200, 'headers': headers, 'body': content}
else:
path = index_file
if os.path.isfile(path):
if os.path.splitext(path)[1] not in self.allow_exts:
return 'mock', {'status': 403, 'body': '403 Fobidon'}
content_type = 'application/octet-stream'
try:
import mimetypes
content_type = mimetypes.types_map.get(os.path.splitext(path)[1])
if os.path.splitext(path)[1].endswith(('crt', 'pem')):
content_type = 'application/x-x509-ca-cert'
except StandardError as e:
logging.error('import mimetypes failed: %r', e)
with open(path, 'rb') as fp:
content = fp.read()
headers = {'Connection': 'close', 'Content-Type': content_type}
return 'mock', {'status': 200, 'headers': headers, 'body': content}
class BlackholeFilter(BaseProxyHandlerFilter):
"""blackhole filter"""
one_pixel_gif = 'GIF89a\x01\x00\x01\x00\x80\xff\x00\xc0\xc0\xc0\x00\x00\x00!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x02\x02D\x01\x00;'
def filter(self, handler):
if handler.command == 'CONNECT':
return 'strip', {}
elif handler.path.startswith(('http://', 'https://')):
headers = {'Cache-Control': 'max-age=86400',
'Expires': 'Oct, 01 Aug 2100 00:00:00 GMT',
'Connection': 'close'}
content = ''
if urlparse.urlsplit(handler.path).path.lower().endswith(('.jpg', '.gif', '.png', '.jpeg', '.bmp')):
headers['Content-Type'] = 'image/gif'
content = self.one_pixel_gif
return 'mock', {'status': 200, 'headers': headers, 'body': content}
else:
return 'mock', {'status': 404, 'headers': {'Connection': 'close'}, 'body': ''}
class Net2(object):
"""getaliasbyname/gethostsbyname/create_tcp_connection/create_ssl_connection/create_http_request"""
skip_headers = frozenset(['Vary',
'Via',
'X-Forwarded-For',
'Proxy-Authorization',
'Proxy-Connection',
'Upgrade',
'X-Chrome-Variations',
'Connection',
'Cache-Control'])
def getaliasbyname(self, name):
return None
def gethostsbyname(self, hostname):
return socket.gethostbyname_ex(hostname)[-1]
def create_tcp_connection(self, hostname, port, timeout, **kwargs):
sock = socket.create_connection((hostname, port), timeout)
data = kwargs.get('client_hello')
if data:
sock.send(data)
return sock
def create_ssl_connection(self, hostname, port, timeout, **kwargs):
sock = self.create_tcp_connection(hostname, port, timeout, **kwargs)
ssl_sock = ssl.wrap_socket(sock)
return ssl_sock
def create_http_request(self, method, url, headers, body, timeout, **kwargs):
scheme, netloc, path, query, _ = urlparse.urlsplit(url)
if netloc.rfind(':') <= netloc.rfind(']'):
# no port number
host = netloc
port = 443 if scheme == 'https' else 80
else:
host, _, port = netloc.rpartition(':')
port = int(port)
if query:
path += '?' + query
if 'Host' not in headers:
headers['Host'] = host
if body and 'Content-Length' not in headers:
headers['Content-Length'] = str(len(body))
headers = dict((k.title(), v) for k, v in headers.items() if k.title() not in self.skip_headers)
ConnectionType = httplib.HTTPSConnection if scheme == 'https' else httplib.HTTPConnection
connection = ConnectionType(netloc, timeout=timeout)
connection.request(method, path, body=body, headers=headers)
response = connection.getresponse()
return response
class ProxyNet2(Net2):
"""Proxy Connection Mixin"""
def __init__(self, proxy_host, proxy_port, proxy_username='', proxy_password=''):
self.proxy_host = proxy_host
self.proxy_port = proxy_port
self.proxy_username = proxy_username
self.proxy_password = proxy_password
def gethostsbyname(self, hostname):
try:
return socket.gethostbyname_ex(hostname)[-1]
except socket.error:
return [hostname]
def create_tcp_connection(self, hostname, port, timeout, **kwargs):
sock = socket.create_connection((self.proxy_host, int(self.proxy_port)))
if hostname.endswith('.appspot.com'):
hostname = 'www.google.com'
request_data = 'CONNECT %s:%s HTTP/1.1\r\n' % (hostname, port)
if self.proxy_username and self.proxy_password:
request_data += 'Proxy-Authorization: Basic %s\r\n' % base64.b64encode(('%s:%s' % (self.proxy_username, self.proxy_password)).encode()).decode().strip()
request_data += '\r\n'
sock.sendall(request_data)
response = httplib.HTTPResponse(sock)
response.fp.close()
response.fp = sock.makefile('rb', 0)
response.begin()
if response.status >= 400:
raise httplib.BadStatusLine('%s %s %s' % (response.version, response.status, response.reason))
return sock
def create_ssl_connection(self, hostname, port, timeout, **kwargs):
sock = self.create_tcp_connection(hostname, port, timeout, **kwargs)
ssl_sock = ssl.wrap_socket(sock)
return ssl_sock
class AdvancedNet2(Net2):
"""getaliasbyname/gethostsbyname/create_tcp_connection/create_ssl_connection/create_http_request"""
def __init__(self, window=4, connect_timeout=6, timeout=8, ssl_version='TLSv1', dns_servers=['8.8.8.8', '114.114.114.114'], dns_blacklist=[], dns_cachesize=64*1024):
self.max_window = window
self.connect_timeout = connect_timeout
self.timeout = timeout
self.ssl_version = getattr(ssl, 'PROTOCOL_%s' % ssl_version)
self.openssl_context = OpenSSL.SSL.Context(getattr(OpenSSL.SSL, '%s_METHOD' % ssl_version))
self.dns_servers = dns_servers
self.dns_blacklist = dns_blacklist
self.dns_cache = LRUCache(dns_cachesize)
self.tcp_connection_time = collections.defaultdict(float)
self.tcp_connection_time_with_clienthello = collections.defaultdict(float)
self.tcp_connection_cache = collections.defaultdict(Queue.PriorityQueue)
self.tcp_connection_good_ipaddrs = {}
self.tcp_connection_bad_ipaddrs = {}
self.tcp_connection_unknown_ipaddrs = {}
self.tcp_connection_cachesock = False
self.tcp_connection_keepalive = False
self.ssl_connection_time = collections.defaultdict(float)
self.ssl_connection_cache = collections.defaultdict(Queue.PriorityQueue)
self.ssl_connection_good_ipaddrs = {}
self.ssl_connection_bad_ipaddrs = {}
self.ssl_connection_unknown_ipaddrs = {}
self.ssl_connection_cachesock = False
self.ssl_connection_keepalive = False
self.iplist_alias = {}
self.fixed_iplist = set([])
self.host_map = collections.OrderedDict()
self.host_postfix_map = collections.OrderedDict()
self.host_postfix_endswith = tuple()
self.hostport_map = collections.OrderedDict()
self.hostport_postfix_map = collections.OrderedDict()
self.hostport_postfix_endswith = tuple()
self.urlre_map = collections.OrderedDict()
def getaliasbyname(self, name):
if '://' in name:
if self.urlre_map:
try:
return next(self.urlre_map[x] for x in self.urlre_map if x(name))
except StopIteration:
pass
name = urlparse.urlsplit(name).netloc
mo = re.search(r'^(.+):(\d+)$', name)
if mo:
try:
return self.hostport_map[name]
except LookupError:
pass
if name.endswith(self.hostport_postfix_endswith):
self.hostport_map[name] = alias = next(self.hostport_postfix_map[x] for x in self.hostport_postfix_map if name.endswith(x))
return alias
name = mo.group(1).strip('[]')
try:
return self.host_map[name]
except LookupError:
pass
if name.endswith(self.host_postfix_endswith):
self.host_map[name] = alias = next(self.host_postfix_map[x] for x in self.host_postfix_map if name.endswith(x))
return alias
return None
def gethostsbyname(self, hostname):
try:
iplist = self.dns_cache[hostname]
except KeyError:
if re.match(r'^\d+\.\d+\.\d+\.\d+$', hostname) or ':' in hostname:
iplist = [hostname]
elif self.dns_servers:
try:
record = dnslib_resolve_over_udp(hostname, self.dns_servers, timeout=2, blacklist=self.dns_blacklist)
except socket.gaierror:
record = dnslib_resolve_over_tcp(hostname, self.dns_servers, timeout=2, blacklist=self.dns_blacklist)
iplist = dnslib_record2iplist(record)
else:
iplist = socket.gethostbyname_ex(hostname)[-1]
self.dns_cache[hostname] = iplist
return iplist
def create_tcp_connection(self, hostname, port, timeout, **kwargs):
client_hello = kwargs.get('client_hello', None)
cache_key = kwargs.get('cache_key', '') if not client_hello else ''
def create_connection(ipaddr, timeout, queobj):
sock = None
sock = None
try:
# create a ipv4/ipv6 socket object
sock = socket.socket(socket.AF_INET if ':' not in ipaddr[0] else socket.AF_INET6)
# set reuseaddr option to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
# disable negal algorithm to send http request quickly.
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
# set a short timeout to trigger timeout retry more quickly.
sock.settimeout(min(self.connect_timeout, timeout))
# start connection time record
start_time = time.time()
# TCP connect
sock.connect(ipaddr)
# end connection time record
connected_time = time.time()
# record TCP connection time
self.tcp_connection_time[ipaddr] = sock.tcp_time = connected_time - start_time
if gevent and isinstance(sock, gevent.socket.socket):
sock.tcp_time = connected_time - start_time
if client_hello:
sock.sendall(client_hello)
if gevent and isinstance(sock, gevent.socket.socket):
sock.data = data = sock.recv(4096)
else:
data = sock.recv(4096, socket.MSG_PEEK)
if not data:
logging.debug('create_tcp_connection %r with client_hello return NULL byte, continue %r', ipaddr, time.time()-start_time)
raise socket.timeout('timed out')
# record TCP connection time with client hello
self.tcp_connection_time_with_clienthello[ipaddr] = time.time() - start_time
# remove from bad/unknown ipaddrs dict
self.tcp_connection_bad_ipaddrs.pop(ipaddr, None)
self.tcp_connection_unknown_ipaddrs.pop(ipaddr, None)
# add to good ipaddrs dict
if ipaddr not in self.tcp_connection_good_ipaddrs:
self.tcp_connection_good_ipaddrs[ipaddr] = connected_time
# put ssl socket object to output queobj
queobj.put(sock)
except (socket.error, ssl.SSLError, OSError) as e:
# any socket.error, put Excpetions to output queobj.
queobj.put(e)
# reset a large and random timeout to the ipaddr
self.tcp_connection_time[ipaddr] = self.connect_timeout + random.random()
# add to bad ipaddrs dict
if ipaddr not in self.tcp_connection_bad_ipaddrs:
self.tcp_connection_bad_ipaddrs[ipaddr] = time.time()
# remove from good/unknown ipaddrs dict
self.tcp_connection_good_ipaddrs.pop(ipaddr, None)
self.tcp_connection_unknown_ipaddrs.pop(ipaddr, None)
# close ssl socket
if sock:
sock.close()
def close_connection(count, queobj, first_tcp_time):
for _ in range(count):
sock = queobj.get()
tcp_time_threshold = min(1, 1.3 * first_tcp_time)
if sock and hasattr(sock, 'getpeername'):
if cache_key and (sock.getpeername()[0] in self.fixed_iplist or self.tcp_connection_cachesock) and sock.tcp_time < tcp_time_threshold:
cache_queue = self.tcp_connection_cache[cache_key]
if cache_queue.qsize() < 8:
try:
_, old_sock = cache_queue.get_nowait()
old_sock.close()
except Queue.Empty:
pass
cache_queue.put((time.time(), sock))
else:
sock.close()
def reorg_ipaddrs():
current_time = time.time()
for ipaddr, ctime in self.tcp_connection_good_ipaddrs.items():
if current_time - ctime > 4 * 60 and len(self.tcp_connection_good_ipaddrs) > 2 * self.max_window and ipaddr[0] not in self.fixed_iplist:
self.tcp_connection_good_ipaddrs.pop(ipaddr, None)
self.tcp_connection_unknown_ipaddrs[ipaddr] = ctime
for ipaddr, ctime in self.tcp_connection_bad_ipaddrs.items():
if current_time - ctime > 6 * 60:
self.tcp_connection_bad_ipaddrs.pop(ipaddr, None)
self.tcp_connection_unknown_ipaddrs[ipaddr] = ctime
logging.info("tcp good_ipaddrs=%d, bad_ipaddrs=%d, unknown_ipaddrs=%d", len(self.tcp_connection_good_ipaddrs), len(self.tcp_connection_bad_ipaddrs), len(self.tcp_connection_unknown_ipaddrs))
try:
while cache_key:
ctime, sock = self.tcp_connection_cache[cache_key].get_nowait()
if time.time() - ctime < self.connect_timeout:
return sock
else:
sock.close()
except Queue.Empty:
pass
addresses = [(x, port) for x in self.iplist_alias.get(self.getaliasbyname('%s:%d' % (hostname, port))) or self.gethostsbyname(hostname)]
#logging.info('gethostsbyname(%r) return %d addresses', hostname, len(addresses))
sock = None
for i in range(kwargs.get('max_retry', 4)):
reorg_ipaddrs()
window = self.max_window + i
if len(self.ssl_connection_good_ipaddrs) > len(self.ssl_connection_bad_ipaddrs):
window = max(2, window-2)
if len(self.tcp_connection_bad_ipaddrs)/2 >= len(self.tcp_connection_good_ipaddrs) <= 1.5 * window:
window += 2
good_ipaddrs = [x for x in addresses if x in self.tcp_connection_good_ipaddrs]
good_ipaddrs = sorted(good_ipaddrs, key=self.tcp_connection_time.get)[:window]
unknown_ipaddrs = [x for x in addresses if x not in self.tcp_connection_good_ipaddrs and x not in self.tcp_connection_bad_ipaddrs]
random.shuffle(unknown_ipaddrs)
unknown_ipaddrs = unknown_ipaddrs[:window]
bad_ipaddrs = [x for x in addresses if x in self.tcp_connection_bad_ipaddrs]
bad_ipaddrs = sorted(bad_ipaddrs, key=self.tcp_connection_bad_ipaddrs.get)[:window]
addrs = good_ipaddrs + unknown_ipaddrs + bad_ipaddrs
remain_window = 3 * window - len(addrs)
if 0 < remain_window <= len(addresses):
addrs += random.sample(addresses, remain_window)
logging.debug('%s good_ipaddrs=%d, unknown_ipaddrs=%r, bad_ipaddrs=%r', cache_key, len(good_ipaddrs), len(unknown_ipaddrs), len(bad_ipaddrs))
queobj = Queue.Queue()
for addr in addrs:
thread.start_new_thread(create_connection, (addr, timeout, queobj))
for i in range(len(addrs)):
sock = queobj.get()
if hasattr(sock, 'getpeername'):
spawn_later(0.01, close_connection, len(addrs)-i-1, queobj, getattr(sock, 'tcp_time') or self.tcp_connection_time[sock.getpeername()])
return sock
elif i == 0:
# only output first error
logging.warning('create_tcp_connection to %r with %s return %r, try again.', hostname, addrs, sock)
if not hasattr(sock, 'getpeername'):
raise sock
def create_ssl_connection(self, hostname, port, timeout, **kwargs):
cache_key = kwargs.get('cache_key', '')
validate = kwargs.get('validate')
headfirst = kwargs.get('headfirst')
def create_connection(ipaddr, timeout, queobj):
sock = None
ssl_sock = None
try:
# create a ipv4/ipv6 socket object
sock = socket.socket(socket.AF_INET if ':' not in ipaddr[0] else socket.AF_INET6)
# set reuseaddr option to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
# disable negal algorithm to send http request quickly.
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
# set a short timeout to trigger timeout retry more quickly.
sock.settimeout(min(self.connect_timeout, timeout))
# pick up the certificate
if not validate:
ssl_sock = ssl.wrap_socket(sock, ssl_version=self.ssl_version, do_handshake_on_connect=False)
else:
ssl_sock = ssl.wrap_socket(sock, ssl_version=self.ssl_version, cert_reqs=ssl.CERT_REQUIRED, ca_certs=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'cacert.pem'), do_handshake_on_connect=False)
ssl_sock.settimeout(min(self.connect_timeout, timeout))
# start connection time record
start_time = time.time()
# TCP connect
ssl_sock.connect(ipaddr)
connected_time = time.time()
# SSL handshake
ssl_sock.do_handshake()
handshaked_time = time.time()
# record TCP connection time
self.tcp_connection_time[ipaddr] = ssl_sock.tcp_time = connected_time - start_time
# record SSL connection time
self.ssl_connection_time[ipaddr] = ssl_sock.ssl_time = handshaked_time - start_time
ssl_sock.ssl_time = connected_time - start_time
# sometimes, we want to use raw tcp socket directly(select/epoll), so setattr it to ssl socket.
ssl_sock.sock = sock
# remove from bad/unknown ipaddrs dict
self.ssl_connection_bad_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs.pop(ipaddr, None)
# add to good ipaddrs dict
if ipaddr not in self.ssl_connection_good_ipaddrs:
self.ssl_connection_good_ipaddrs[ipaddr] = handshaked_time
# verify SSL certificate issuer.
if validate and (hostname.endswith('.appspot.com') or '.google' in hostname):
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_ASN1, ssl_sock.getpeercert(True))
issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '')
if not issuer_commonname.startswith('Google'):
raise socket.error('%r certficate is issued by %r, not Google' % (hostname, issuer_commonname))
# set timeout
ssl_sock.settimeout(timeout)
# do head first check
if headfirst:
ssl_sock.send('HEAD /favicon.ico HTTP/1.1\r\nHost: %s\r\n\r\n' % hostname)
response = httplib.HTTPResponse(ssl_sock, buffering=True)
try:
if gevent:
with gevent.Timeout(timeout):
response.begin()
else:
response.begin()
except gevent.Timeout:
ssl_sock.close()
raise socket.timeout('timed out')
finally:
response.close()
# put ssl socket object to output queobj
queobj.put(ssl_sock)
except (socket.error, ssl.SSLError, OSError) as e:
# any socket.error, put Excpetions to output queobj.
queobj.put(e)
# reset a large and random timeout to the ipaddr
self.ssl_connection_time[ipaddr] = self.connect_timeout + random.random()
# add to bad ipaddrs dict
if ipaddr[0] in self.fixed_iplist:
logging.warn('bad IP: %s (%r)', ipaddr, e)
if ipaddr not in self.ssl_connection_bad_ipaddrs:
self.ssl_connection_bad_ipaddrs[ipaddr] = time.time()
# remove from good/unknown ipaddrs dict
self.ssl_connection_good_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs.pop(ipaddr, None)
# close ssl socket
if ssl_sock:
ssl_sock.close()
# close tcp socket
if sock:
sock.close()
def create_connection_withopenssl(ipaddr, timeout, queobj):
sock = None
ssl_sock = None
timer = None
NetworkError = (socket.error, OpenSSL.SSL.Error, OSError)
if gevent and (ipaddr[0] not in self.fixed_iplist):
NetworkError += (gevent.Timeout,)
#timer = gevent.Timeout(timeout)
#timer.start()
try:
# create a ipv4/ipv6 socket object
sock = socket.socket(socket.AF_INET if ':' not in ipaddr[0] else socket.AF_INET6)
# set reuseaddr option to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# set struct linger{l_onoff=1,l_linger=0} to avoid 10048 socket error
sock.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', 1, 0))
# resize socket recv buffer 8K->32K to improve browser releated application performance
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 32*1024)
# disable negal algorithm to send http request quickly.
sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, True)
# set a short timeout to trigger timeout retry more quickly.
sock.settimeout(timeout or self.connect_timeout)
# pick up the certificate
server_hostname = random_hostname() if (cache_key or '').startswith('google_') or hostname.endswith('.appspot.com') else None
ssl_sock = SSLConnection(self.openssl_context, sock)
ssl_sock.set_connect_state()
if server_hostname and hasattr(ssl_sock, 'set_tlsext_host_name'):
ssl_sock.set_tlsext_host_name(server_hostname)
# start connection time record
start_time = time.time()
# TCP connect
ssl_sock.connect(ipaddr)
connected_time = time.time()
# SSL handshake
ssl_sock.do_handshake()
handshaked_time = time.time()
# record TCP connection time
self.tcp_connection_time[ipaddr] = ssl_sock.tcp_time = connected_time - start_time
# record SSL connection time
self.ssl_connection_time[ipaddr] = ssl_sock.ssl_time = handshaked_time - start_time
# sometimes, we want to use raw tcp socket directly(select/epoll), so setattr it to ssl socket.
ssl_sock.sock = sock
# remove from bad/unknown ipaddrs dict
self.ssl_connection_bad_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs.pop(ipaddr, None)
# add to good ipaddrs dict
if ipaddr not in self.ssl_connection_good_ipaddrs:
self.ssl_connection_good_ipaddrs[ipaddr] = handshaked_time
# verify SSL certificate issuer.
if validate and (hostname.endswith('.appspot.com') or '.google' in hostname):
cert = ssl_sock.get_peer_certificate()
issuer_commonname = next((v for k, v in cert.get_issuer().get_components() if k == 'CN'), '')
if not issuer_commonname.startswith('Google'):
raise socket.error('%r certficate is issued by %r, not Google' % (hostname, issuer_commonname))
# do head first check
if headfirst:
ssl_sock.send('HEAD /favicon.ico HTTP/1.1\r\nHost: %s\r\n\r\n' % hostname)
response = httplib.HTTPResponse(ssl_sock, buffering=True)
try:
if gevent:
with gevent.Timeout(timeout):
response.begin()
else:
response.begin()
except gevent.Timeout:
ssl_sock.close()
raise socket.timeout('timed out')
finally:
response.close()
# put ssl socket object to output queobj
queobj.put(ssl_sock)
except NetworkError as e:
# any socket.error, put Excpetions to output queobj.
queobj.put(e)
# reset a large and random timeout to the ipaddr
self.ssl_connection_time[ipaddr] = self.connect_timeout + random.random()
# add to bad ipaddrs dict
if ipaddr[0] in self.fixed_iplist:
logging.warn('bad IP: %s (%r)', ipaddr, e)
if ipaddr not in self.ssl_connection_bad_ipaddrs:
self.ssl_connection_bad_ipaddrs[ipaddr] = time.time()
# remove from good/unknown ipaddrs dict
self.ssl_connection_good_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs.pop(ipaddr, None)
# close ssl socket
if ssl_sock:
ssl_sock.close()
# close tcp socket
if sock:
sock.close()
finally:
if timer:
timer.cancel()
def close_connection(count, queobj, first_tcp_time, first_ssl_time):
for _ in range(count):
sock = queobj.get()
ssl_time_threshold = min(1, 1.3 * first_ssl_time)
if sock and hasattr(sock, 'getpeername'):
if cache_key and (sock.getpeername()[0] in self.fixed_iplist or self.ssl_connection_cachesock) and sock.ssl_time < ssl_time_threshold:
cache_queue = self.ssl_connection_cache[cache_key]
if cache_queue.qsize() < 8:
try:
_, old_sock = cache_queue.get_nowait()
old_sock.close()
except Queue.Empty:
pass
cache_queue.put((time.time(), sock))
else:
sock.close()
def reorg_ipaddrs():
current_time = time.time()
for ipaddr, ctime in self.ssl_connection_good_ipaddrs.items():
if current_time - ctime > 4 * 60 and len(self.ssl_connection_good_ipaddrs) > 2 * self.max_window and ipaddr[0] not in self.fixed_iplist:
self.ssl_connection_good_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs[ipaddr] = ctime
for ipaddr, ctime in self.ssl_connection_bad_ipaddrs.items():
if current_time - ctime > 6 * 60:
self.ssl_connection_bad_ipaddrs.pop(ipaddr, None)
self.ssl_connection_unknown_ipaddrs[ipaddr] = ctime
logging.info("ssl good_ipaddrs=%d, bad_ipaddrs=%d, unknown_ipaddrs=%d", len(self.ssl_connection_good_ipaddrs), len(self.ssl_connection_bad_ipaddrs), len(self.ssl_connection_unknown_ipaddrs))
try:
while cache_key:
ctime, sock = self.ssl_connection_cache[cache_key].get_nowait()
if time.time() - ctime < self.connect_timeout:
return sock
else:
sock.close()
except Queue.Empty:
pass
addresses = [(x, port) for x in self.iplist_alias.get(self.getaliasbyname('%s:%d' % (hostname, port))) or self.gethostsbyname(hostname)]
#logging.info('gethostsbyname(%r) return %d addresses', hostname, len(addresses))
sock = None
for i in range(kwargs.get('max_retry', 4)):
reorg_ipaddrs()
good_ipaddrs = sorted([x for x in addresses if x in self.ssl_connection_good_ipaddrs], key=self.ssl_connection_time.get)
bad_ipaddrs = sorted([x for x in addresses if x in self.ssl_connection_bad_ipaddrs], key=self.ssl_connection_bad_ipaddrs.get)
unknown_ipaddrs = [x for x in addresses if x not in self.ssl_connection_good_ipaddrs and x not in self.ssl_connection_bad_ipaddrs]
random.shuffle(unknown_ipaddrs)
window = self.max_window + i
if len(bad_ipaddrs) < 0.2 * len(good_ipaddrs) and len(good_ipaddrs) > 10:
addrs = good_ipaddrs[:window]
addrs += [random.choice(unknown_ipaddrs)] if unknown_ipaddrs else []
elif len(good_ipaddrs) > 2 * window or len(bad_ipaddrs) < 0.5 * len(good_ipaddrs):
addrs = (good_ipaddrs[:window] + unknown_ipaddrs + bad_ipaddrs)[:2*window]
else:
addrs = good_ipaddrs[:window] + unknown_ipaddrs[:window] + bad_ipaddrs[:window]
addrs += random.sample(addresses, min(len(addresses), 3*window-len(addrs))) if len(addrs) < 3*window else []
logging.debug('%s good_ipaddrs=%d, unknown_ipaddrs=%r, bad_ipaddrs=%r', cache_key, len(good_ipaddrs), len(unknown_ipaddrs), len(bad_ipaddrs))
queobj = Queue.Queue()
for addr in addrs:
if sys.platform != 'win32':
# Workaround for CPU 100% issue under MacOSX/Linux
thread.start_new_thread(create_connection, (addr, timeout, queobj))
else:
thread.start_new_thread(create_connection_withopenssl, (addr, timeout, queobj))
errors = []
for i in range(len(addrs)):
sock = queobj.get()
if hasattr(sock, 'getpeername'):
spawn_later(0.01, close_connection, len(addrs)-i-1, queobj, sock.tcp_time, sock.ssl_time)
return sock
else:
errors.append(sock)
if i == len(addrs) - 1:
logging.warning('create_ssl_connection to %r with %s return %s, try again.', hostname, addrs, collections.OrderedDict.fromkeys(str(x) for x in errors).keys())
if not hasattr(sock, 'getpeername'):
raise sock
def create_http_request(self, method, url, headers, body, timeout, max_retry=2, bufsize=8192, crlf=None, validate=None, cache_key=None, headfirst=False, **kwargs):
scheme, netloc, path, query, _ = urlparse.urlsplit(url)
if netloc.rfind(':') <= netloc.rfind(']'):
# no port number
host = netloc
port = 443 if scheme == 'https' else 80
else:
host, _, port = netloc.rpartition(':')
port = int(port)
if query:
path += '?' + query
if 'Host' not in headers:
headers['Host'] = host
if body and 'Content-Length' not in headers:
headers['Content-Length'] = str(len(body))
sock = None
for i in range(max_retry):
try:
create_connection = self.create_ssl_connection if scheme == 'https' else self.create_tcp_connection
sock = create_connection(host, port, timeout, validate=validate, cache_key=cache_key, headfirst=headfirst)
break
except StandardError as e:
logging.exception('create_http_request "%s %s" failed:%s', method, url, e)
if sock:
sock.close()
if i == max_retry - 1:
raise
request_data = ''
crlf_counter = 0
if scheme != 'https' and crlf:
fakeheaders = dict((k.title(), v) for k, v in headers.items())
fakeheaders.pop('Content-Length', None)
fakeheaders.pop('Cookie', None)
fakeheaders.pop('Host', None)
if 'User-Agent' not in fakeheaders:
fakeheaders['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1878.0 Safari/537.36'
if 'Accept-Language' not in fakeheaders:
fakeheaders['Accept-Language'] = 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4'
if 'Accept' not in fakeheaders:
fakeheaders['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
fakeheaders_data = ''.join('%s: %s\r\n' % (k, v) for k, v in fakeheaders.items() if k not in self.skip_headers)
while crlf_counter < 5 or len(request_data) < 1500 * 2:
request_data += 'GET / HTTP/1.1\r\n%s\r\n' % fakeheaders_data
crlf_counter += 1
request_data += '\r\n\r\n\r\n'
request_data += '%s %s %s\r\n' % (method, path, 'HTTP/1.1')
request_data += ''.join('%s: %s\r\n' % (k.title(), v) for k, v in headers.items() if k.title() not in self.skip_headers)
request_data += '\r\n'
if isinstance(body, bytes):
sock.sendall(request_data.encode() + body)
elif hasattr(body, 'read'):
sock.sendall(request_data)
while 1:
data = body.read(bufsize)
if not data:
break
sock.sendall(data)
else:
raise TypeError('create_http_request(body) must be a string or buffer, not %r' % type(body))
response = None
try:
while crlf_counter:
if sys.version[:3] == '2.7':
response = httplib.HTTPResponse(sock, buffering=False)
else:
response = httplib.HTTPResponse(sock)
response.fp.close()
response.fp = sock.makefile('rb', 0)
response.begin()
response.read()
response.close()
crlf_counter -= 1
except StandardError as e:
logging.exception('crlf skip read host=%r path=%r error: %r', headers.get('Host'), path, e)
if response:
if response.fp and response.fp._sock:
response.fp._sock.close()
response.close()
if sock:
sock.close()
return None
if sys.version[:3] == '2.7':
response = httplib.HTTPResponse(sock, buffering=True)
else:
response = httplib.HTTPResponse(sock)
response.fp.close()
response.fp = sock.makefile('rb')
if gevent and not headfirst and kwargs.get('read_timeout'):
try:
with gevent.Timeout(int(kwargs.get('read_timeout'))):
response.begin()
except gevent.Timeout:
response.close()
raise socket.timeout('timed out')
else:
orig_timeout = sock.gettimeout()
sock.settimeout(self.connect_timeout)
response.begin()
sock.settimeout(orig_timeout)
if ((scheme == 'https' and self.ssl_connection_cachesock and self.ssl_connection_keepalive) or (scheme == 'http' and self.tcp_connection_cachesock and self.tcp_connection_keepalive)) and cache_key:
response.cache_key = cache_key
response.cache_sock = response.fp._sock
return response
def enable_connection_cache(self, enabled=True):
self.tcp_connection_cachesock = enabled
self.ssl_connection_cachesock = enabled
def enable_connection_keepalive(self, enabled=True):
self.tcp_connection_cachesock = enabled
self.tcp_connection_keepalive = enabled
self.ssl_connection_cachesock = enabled
self.ssl_connection_keepalive = enabled
def enable_openssl_session_cache(self, enabled=True):
if enabled:
openssl_set_session_cache_mode(self.openssl_context, 'client')
def add_iplist_alias(self, name, iplist):
assert isinstance(name, basestring) and isinstance(iplist, list)
self.iplist_alias[name] = list(set(self.iplist_alias.get(name, []) + iplist))
def add_fixed_iplist(self, iplist):
assert isinstance(iplist, list)
self.fixed_iplist.update(iplist)
def add_rule(self, pattern, hosts):
assert isinstance(pattern, basestring) and isinstance(hosts, basestring)
if ':' in pattern and '\\' not in pattern:
if pattern.startswith('.'):
self.hostport_postfix_map[pattern] = hosts
self.hostport_postfix_endswith = tuple(set(self.hostport_postfix_endswith + (pattern,)))
else:
self.hostport_map[pattern] = hosts
elif '\\' in pattern:
self.urlre_map[re.compile(pattern).match] = hosts
else:
if pattern.startswith('.'):
self.host_postfix_map[pattern] = hosts
self.host_postfix_endswith = tuple(set(self.host_postfix_endswith + (pattern,)))
else:
self.host_map[pattern] = hosts
class SimpleProxyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple Proxy Handler"""
bufsize = 256*1024
protocol_version = 'HTTP/1.1'
ssl_version = ssl.PROTOCOL_SSLv23
disable_transport_ssl = True
scheme = 'http'
first_run_lock = threading.Lock()
handler_filters = [SimpleProxyHandlerFilter()]
handler_plugins = {'direct': DirectFetchPlugin(),
'mock': MockFetchPlugin(),
'strip': StripPlugin(),}
net2 = Net2()
def finish(self):
"""make python2 BaseHTTPRequestHandler happy"""
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except (socket.error, ssl.SSLError, OpenSSL.SSL.Error) as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET, errno.EPIPE):
raise
def address_string(self):
return '%s:%s' % self.client_address[:2]
def send_response(self, code, message=None):
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if self.request_version != 'HTTP/0.9':
self.wfile.write('%s %d %s\r\n' % (self.protocol_version, code, message))
def send_header(self, keyword, value):
"""Send a MIME header."""
base_send_header = BaseHTTPServer.BaseHTTPRequestHandler.send_header
keyword = keyword.title()
if keyword == 'Set-Cookie':
for cookie in re.split(r', (?=[^ =]+(?:=|$))', value):
base_send_header(self, keyword, cookie)
elif keyword == 'Content-Disposition' and '"' not in value:
value = re.sub(r'filename=([^"\']+)', 'filename="\\1"', value)
base_send_header(self, keyword, value)
else:
base_send_header(self, keyword, value)
def setup(self):
if isinstance(self.__class__.first_run, collections.Callable):
try:
with self.__class__.first_run_lock:
if isinstance(self.__class__.first_run, collections.Callable):
self.first_run()
self.__class__.first_run = None
except StandardError as e:
logging.exception('%s.first_run() return %r', self.__class__, e)
self.__class__.setup = BaseHTTPServer.BaseHTTPRequestHandler.setup
self.__class__.do_CONNECT = self.__class__.do_METHOD
self.__class__.do_GET = self.__class__.do_METHOD
self.__class__.do_PUT = self.__class__.do_METHOD
self.__class__.do_POST = self.__class__.do_METHOD
self.__class__.do_HEAD = self.__class__.do_METHOD
self.__class__.do_DELETE = self.__class__.do_METHOD
self.__class__.do_OPTIONS = self.__class__.do_METHOD
self.__class__.do_PATCH = self.__class__.do_METHOD
self.setup()
def handle_one_request(self):
if not self.disable_transport_ssl and self.scheme == 'http':
leadbyte = self.connection.recv(1, socket.MSG_PEEK)
if leadbyte in ('\x80', '\x16'):
server_name = ''
if leadbyte == '\x16':
for _ in xrange(2):
leaddata = self.connection.recv(1024, socket.MSG_PEEK)
if is_clienthello(leaddata):
try:
server_name = extract_sni_name(leaddata)
finally:
break
try:
certfile = CertUtil.get_cert(server_name or 'www.google.com')
ssl_sock = ssl.wrap_socket(self.connection, ssl_version=self.ssl_version, keyfile=certfile, certfile=certfile, server_side=True)
except StandardError as e:
if e.args[0] not in (errno.ECONNABORTED, errno.ECONNRESET):
logging.exception('ssl.wrap_socket(self.connection=%r) failed: %s', self.connection, e)
return
self.connection = ssl_sock
self.rfile = self.connection.makefile('rb', self.bufsize)
self.wfile = self.connection.makefile('wb', 0)
self.scheme = 'https'
return BaseHTTPServer.BaseHTTPRequestHandler.handle_one_request(self)
def first_run(self):
pass
def parse_header(self):
if self.command == 'CONNECT':
netloc = self.path
elif self.path[0] == '/':
netloc = self.headers.get('Host', 'localhost')
self.path = '%s://%s%s' % (self.scheme, netloc, self.path)
else:
netloc = urlparse.urlsplit(self.path).netloc
m = re.match(r'^(.+):(\d+)$', netloc)
if m:
self.host = m.group(1).strip('[]')
self.port = int(m.group(2))
else:
self.host = netloc
self.port = 443 if self.scheme == 'https' else 80
def do_METHOD(self):
self.parse_header()
self.body = self.rfile.read(int(self.headers['Content-Length'])) if 'Content-Length' in self.headers else ''
for handler_filter in self.handler_filters:
action = handler_filter.filter(self)
if not action:
continue
if not isinstance(action, tuple):
raise TypeError('%s must return a tuple, not %r' % (handler_filter, action))
plugin = self.handler_plugins[action[0]]
return plugin.handle(self, **action[1])
def test():
logging.basicConfig(level=logging.INFO, format='%(levelname)s - %(asctime)s %(message)s', datefmt='[%b %d %H:%M:%S]')
# SimpleProxyHandler.handler_filters.insert(0, MIMTProxyHandlerFilter())
server = LocalProxyServer(('', 8080), SimpleProxyHandler)
logging.info('serving at %r', server.server_address)
server.serve_forever()
if __name__ == '__main__':
test()
|
mit
| -8,006,201,042,803,955,000 | 44.801215 | 252 | 0.569537 | false |
botswana-harvard/bcvp
|
bcvp/bcvp_dashboard/classes/subject_dashboard.py
|
1
|
2829
|
from edc_dashboard.subject import RegisteredSubjectDashboard
from bcvp.bcvp_lab.models import SubjectRequisition
from bcvp.bcvp_subject.models import SubjectVisit, SubjectConsent, SubjectLocator
from bcvp.bcvp_subject.models.subject_eligibility import SubjectEligibility
class SubjectDashboard(RegisteredSubjectDashboard):
view = 'subject_dashboard'
dashboard_url_name = 'subject_dashboard_url'
dashboard_name = 'Subject Dashboard'
urlpattern_view = 'bcvp_dashboard.views'
template_name = 'subject_dashboard.html'
urlpatterns = [
RegisteredSubjectDashboard.urlpatterns[0][:-1] +
'(?P<appointment_code>{appointment_code})/$'] + RegisteredSubjectDashboard.urlpatterns
urlpattern_options = dict(
RegisteredSubjectDashboard.urlpattern_options,
dashboard_model=RegisteredSubjectDashboard.urlpattern_options['dashboard_model'] + '|subject_eligibility',
dashboard_type='subject',
appointment_code='1000', )
def __init__(self, **kwargs):
super(SubjectDashboard, self).__init__(**kwargs)
self.subject_dashboard_url = 'subject_dashboard_url'
self.visit_model = SubjectVisit
self.dashboard_type_list = ['subject']
self.membership_form_category = ['subject']
self.dashboard_models['subject_eligibility'] = SubjectEligibility
self.dashboard_models['subject_consent'] = SubjectConsent
self.dashboard_models['visit'] = SubjectVisit
self.requisition_model = SubjectRequisition
self._locator_model = SubjectLocator
def get_context_data(self, **kwargs):
super(SubjectDashboard, self).get_context_data(**kwargs)
self.context.update(
home='bcvp',
search_name='subject',
title='Subject Dashboard',
subject_dashboard_url=self.subject_dashboard_url,
subject_consent=self.consent,
local_results=self.render_labs(),
)
return self.context
@property
def consent(self):
self._consent = None
try:
self._consent = SubjectConsent.objects.get(subject_identifier=self.subject_identifier)
except SubjectConsent.DoesNotExist:
self._consent = None
return self._consent
def get_locator_scheduled_visit_code(self):
""" Returns visit where the locator is scheduled, TODO: maybe search visit definition for this?."""
return '1000'
@property
def subject_locator(self):
return self.locator_model.objects.get(
subject_visit__appointment__registered_subject__subject_identifier=self.subject_identifier)
@property
def subject_identifier(self):
return self.registered_subject.subject_identifier
@property
def locator_model(self):
return SubjectLocator
|
gpl-2.0
| -9,080,255,707,335,773,000 | 38.291667 | 114 | 0.683634 | false |
mayconbordin/boardhood
|
server_app/api/boardhood/tests/test_model_interests.py
|
1
|
4432
|
import unittest
from config import app
from datetime import datetime
from boardhood.models.base import ValidationException
from boardhood.models.interests import Interest
from boardhood.helpers.validator import is_integer
class TestInterestModel(unittest.TestCase):
def setUp(self):
Interest.db = app.db.get_db()
Interest.logger = app.logger
def testCreate(self):
name = 'teste_%s' % str(datetime.now())
obj = Interest.create(Interest(name=name))
self.assertEqual(obj.name, name)
self.assertTrue(is_integer(obj.id), 'ID was supposed to be int. Value: %s' % str(obj.id))
try:
name = 't' * 255
obj = Interest.create(Interest(name=name))
self.fail("Create interest should not succeed with invalid data")
except ValidationException, e:
self.assertTrue(True)
def testNameExists(self):
self.assertTrue(Interest.nameExists('Photography'))
def testExists(self):
self.assertTrue(Interest.exists(1))
def testFind(self):
obj = Interest.find(1)
self.assertEqual(obj.name, 'Photography')
self.assertTrue(is_integer(obj.id))
def testFollow(self):
self.assertTrue(Interest.follow(1, 6))
def testFollowing(self):
self.assertTrue(Interest.following(1, 6))
def testFollowers(self):
followers = Interest.followers(1)
following = False
for follower in followers:
if follower.id == 6:
following = True
self.assertTrue(following)
def testUnfollow(self):
self.assertTrue(Interest.unfollow(1, 6))
def testCountAll(self):
count = Interest.countAll()
self.assertTrue(is_integer(count))
def testCountAllAround(self):
count = Interest.countAll(location=[-27.86403, -54.4593889], radius=20000)
self.assertTrue(is_integer(count))
def testFindAllRecent(self):
objs = Interest.findAll()
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllPopular(self):
objs = Interest.findAll(order_by='popular')
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllOrderByDistance(self):
objs = Interest.findAll(order_by='distance', location=[-27.86403, -54.4593889])
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllOrderByDistanceAround(self):
objs = Interest.findAll(order_by='distance', location=[-27.86403, -54.4593889], radius=20000)
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllRecentAround(self):
objs = Interest.findAll(location=[-27.86403, -54.4593889], radius=20000)
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllPopularAround(self):
objs = Interest.findAll(order_by='popular', location=[-27.86403, -54.4593889], radius=20000)
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testSearchByUser(self):
objs = Interest.searchByUser('d', 1)
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testFindAllByUser(self):
objs = Interest.findAllByUser(1)
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testCountAllByUser(self):
count = Interest.countAllByUser(1)
self.assertTrue(is_integer(count))
def testFindAllByUserName(self):
objs = Interest.findAllByUserName('john')
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
def testCountAllByUserName(self):
count = Interest.countAllByUserName('john')
self.assertTrue(is_integer(count))
def testSearch(self):
objs = Interest.search('m')
self.assertTrue(len(objs) > 0)
for obj in objs:
self.assertTrue(isinstance(obj, Interest))
if __name__ == '__main__':
unittest.main()
|
mit
| 8,091,921,844,353,246,000 | 30.211268 | 101 | 0.635153 | false |
iulian787/spack
|
var/spack/repos/builtin/packages/ninja-fortran/package.py
|
2
|
3110
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NinjaFortran(Package):
"""A Fortran capable fork of ninja."""
homepage = "https://github.com/Kitware/ninja"
url = "https://github.com/Kitware/ninja/archive/v1.9.0.g99df1.kitware.dyndep-1.jobserver-1.tar.gz"
# Each version is a fork off of a specific commit of ninja
# Hashes don't sort properly, so added "artificial" tweak-level version
# number prior to the hashes for sorting puposes
version('1.9.0.2.g99df1', sha256='b7bc3d91e906b92d2e0887639e8ed6b0c45b28e339dda2dbb66c1388c86a9fcf')
version('1.9.0.1.g5b44b', sha256='449359a402c3adccd37f6fece19ce7d7cda586e837fdf50eb7d53597b7f1ce90')
version('1.9.0.0.gad558', sha256='ab486a3ccfb38636bfa61fefb976ddf9a7652f4bf12495a77718b35cc3db61ee')
version('1.8.2.2.g81279', sha256='744a13475ace2c0ff8c8edaf95eb73edf3daf8805e4060b60d18ad4f55bb98aa')
version('1.8.2.1.g3bbbe', sha256='121c432cec32c8aea730a71a256a81442ac8446c6f0e7652ea3121da9e0d482d')
version('1.8.2.0.g972a7', sha256='127db130cb1c711ac4a5bb93d2f2665d304cff5206283332b50bc8ba2eb70d2e')
version('1.7.2.1.gaad58', sha256='fac971edef78fc9f52e47365facb88c5c1c85d6d9c15f4356a1b97352c9ae5f8')
version('1.7.2.0.gcc0ea', sha256='6afa570fa9300833f76e56fa5b01f5a3b7d8a7108f6ad368b067a003d25ef18b')
version('1.7.1.0.g7ca7f', sha256='53472d0c3cf9c1cff7e991699710878be55d21a1c229956dea6a2c3e44edee80')
depends_on('python', type='build')
phases = ['configure', 'install']
def url_for_version(self, version):
# for some reason the hashes are being stripped from incomming
# version, so find the incomming version in all package versions
for ver in self.versions:
if str(version) in str(ver):
break
# remove the "artificial" tweak-level
split_ver = str(ver).split('.')
url_version = ".".join(split_ver[:3]) + "." + split_ver[4]
if version < spack.version.Version('1.8.2.1'):
url = 'https://github.com/Kitware/ninja/archive/v{0}.kitware.dyndep-1.tar.gz'
else:
url = 'https://github.com/Kitware/ninja/archive/v{0}.kitware.dyndep-1.jobserver-1.tar.gz'
return url.format(url_version)
def configure(self, spec, prefix):
python('configure.py', '--bootstrap')
@run_after('configure')
@on_package_attributes(run_tests=True)
def configure_test(self):
ninja = Executable('./ninja')
ninja('-j{0}'.format(make_jobs), 'ninja_test')
ninja_test = Executable('./ninja_test')
ninja_test()
def install(self, spec, prefix):
mkdir(prefix.bin)
install('ninja', prefix.bin)
install_tree('misc', prefix.misc)
# Some distros like Fedora install a 'ninja-build' executable
# instead of 'ninja'. Install both for uniformity.
with working_dir(prefix.bin):
symlink('ninja', 'ninja-build')
|
lgpl-2.1
| -1,166,708,126,163,917,600 | 44.735294 | 107 | 0.694534 | false |
muthu-s/chef-repo
|
cookbooks/wsi/files/createqueues.py
|
1
|
1183
|
import os;
import sys;
import traceback;
#####################################################################
## Create MQ Queues at cluster level
#####################################################################
def createQueues(clusterName, DisplayName, jndiName, queueName, queueMgrName):
clusterid=AdminConfig.getid('/ServerCluster:'+clusterName+'/');
AdminTask.createWMQQueue(clusterid,["-name "+DisplayName+" -jndiName "+jndiName+" -queueName "+queueName+" -qmgr "+queueMgrName])
print "\n Saving Configuration /n"
AdminConfig.save()
print "/n Queue created \n"
return
#####################################################################
## Main
#####################################################################
if len(sys.argv) != 5:
print "This script requires ClusterName, Queue display name, queue JNDI Name, queue name, and qmgr name"
sys.exit(1)
else:
clusterName = sys.argv[0]
DisplayName = sys.argv[1]
jndiName = sys.argv[2]
queueName = sys.argv[3]
queueMgrName = sys.argv[4]
createQueues(clusterName, DisplayName, jndiName, queueName, queueMgrName)
|
apache-2.0
| 7,639,029,678,157,415,000 | 37.16129 | 137 | 0.510566 | false |
getupcloud/referral
|
utils.py
|
1
|
1456
|
from urllib.parse import unquote
from flask import request, url_for
from playhouse.shortcuts import model_to_dict, dict_to_model
from decimal import Decimal
def to_dict(obj):
"""
Helper method that returns a mongoengine document in python dict format
"""
from models import ReferralProgram
def treat_data(value):
if isinstance(value,Decimal):
return float(value)
elif isinstance(value, dict):
value_data = {k:treat_data(value[k]) for k in value}
return value_data
else:
return value
model_dict = model_to_dict(obj)
data = {k:treat_data(model_dict[k]) for k in model_dict}
if isinstance(obj, ReferralProgram):
data['link'] = request.host_url.rstrip('/') + url_for('program', program_id=str(obj.id))
return data
def to_list_dict(objects):
"""
Iterate over list of documents and return a list of dict of these
"""
return [to_dict(x) for x in objects]
def list_routes(app):
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
if 'static' not in rule.endpoint:
line = {"name":rule.endpoint, 'methods':methods, 'url':unquote(url)}
output.append(line)
return output
|
apache-2.0
| -5,856,686,055,878,276,000 | 28.12 | 96 | 0.608516 | false |
z-william131/small_projects
|
FileSort/FileSort.py
|
1
|
3269
|
# !/usr/bin/python
# The FiltSort function need an input: the path to a folder,
# and then it will generate an output.txt file that includes
# the "index, file name, address, maintag, subtag, subtag2"
# of every file in that folder.
# It can also return the output file as a list.
import os
def setLabel(path):
indexs = []
index = 0
for p in path:
if p == "/":
indexs.append(index)
index += 1
main_tag, sub_tag, sub_tag2, file = "None", "None", "None", "None"
if len(indexs) == 1:
file = path[(indexs[0] + 1):]
if len(indexs) == 2:
main_tag = path[(indexs[0] + 1) : indexs[1]]
file = path[(indexs[1] + 1):]
if len(indexs) == 3:
main_tag = path[(indexs[0] + 1) : indexs[1]]
sub_tag = path[(indexs[1] + 1) : indexs[2]]
file = path[(indexs[2] + 1):]
if len(indexs) == 4:
main_tag = path[(indexs[0] + 1) : indexs[1]]
sub_tag = path[(indexs[1] + 1) : indexs[2]]
sub_tag2 = path[(indexs[2] + 1) : indexs[3]]
file = path[(indexs[3] + 1):]
return [file, path, main_tag, sub_tag, sub_tag2]
def str2Label(line):
return line.split(", ")
def label2Str(labels):
output = ', '.join(label for label in labels)
return output
def isFile(file):
return "." in file
# check whether the file is the hidden file, and whether the dictionary contains no file.
def isValidLabel(labels):
for label in labels:
if label[0] == ".":
return False
return isFile(labels[0])
def output2List():
output = open("output.txt", "r")
elements = output.readlines()
output_list = []
for element in elements:
output_list.append(str2Label(element))
return output_list
def FileSort(path):
if os.path.exists("output.txt"):
output = open("output.txt", "r")
output_update = open("output_update.txt","w")
elements = output.readlines()
names =[]
for element in elements:
names.append(str2Label(element)[2])
output_update.write(element)
output.close()
index = len(names)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
root_length = len(path)
file_name = str(os.path.join(root, name))[(root_length):]
labels = setLabel(file_name)
if isValidLabel(labels) and labels[1] not in names:
result = label2Str(labels)
output_update.write(str(index) + ", " + result + "\n")
index += 1
os.rename("output_update.txt", "output.txt")
output_update.close()
else:
output = open("output.txt","w")
index = 0
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
root_length = len(path)
file_name = str(os.path.join(root, name))[(root_length):]
labels = setLabel(file_name)
if isValidLabel(labels):
result = label2Str(labels)
output.write(str(index) + ", " + result + "\n")
index += 1
output.close()
return output2List()
|
mit
| 7,337,251,025,384,431,000 | 27.929204 | 89 | 0.542368 | false |
chawins/aml
|
lib/RandomEnhance.py
|
1
|
2811
|
import random
import numpy as np
from PIL import Image, ImageEnhance
class RandomEnhance():
"""
Class to randomly enhance image by adjusting color, contrast, sharpness and
brightness
"""
def __init__(self, seed=None, p=1.0, intensity=0.5):
"""
Initialises an instance.
Parameters
----------
seed : int
Random seed for random number generator
p : float
Probability of augmenting a single example, must be in a
range of [0, 1]
intensity : float
Augmentation intensity, must be in a [0, 1] range
"""
self.p = p
self.intensity = intensity
self.random = random.Random()
if seed is not None:
self.seed = seed
self.random.seed(seed)
self.last_factors = None
def get_last_factors(self):
return self.last_factors
def enhance(self, image):
"""
Randomly enhance input image with probability p
"""
if self.random.uniform(0, 1) < self.p:
return self.intensity_enhance(image)
else:
return image
def intensity_enhance(self, im):
"""
Perform random enhancement with specified intensity [0,1]. The range of
random factors are chosen to be in an appropriate range.
"""
color_factor = self.intensity * self.random.uniform(-0.4, 0.4) + 1
contrast_factor = self.intensity * self.random.uniform(-0.5, 0.5) + 1
sharpess_factor = self.intensity * self.random.uniform(-0.8, 0.8) + 1
bright_factor = self.intensity * self.random.uniform(-0.5, 0.5) + 1
self.last_factors = [color_factor, contrast_factor, sharpess_factor, bright_factor]
image = Image.fromarray(np.uint8(im * 255))
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(color_factor)
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(contrast_factor)
enhancer = ImageEnhance.Sharpness(image)
image = enhancer.enhance(sharpess_factor)
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(bright_factor)
return np.asarray(image) / 255.
def enhance_factors(self, im, factors):
image = Image.fromarray(np.uint8(im * 255))
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(factors[0])
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(factors[1])
enhancer = ImageEnhance.Sharpness(image)
image = enhancer.enhance(factors[2])
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(factors[3])
return np.asarray(image) / 255.
|
mit
| -8,288,442,314,468,944,000 | 32.070588 | 91 | 0.599075 | false |
inspirehep/json-merger
|
json_merger/__init__.py
|
1
|
16680
|
# -*- coding: utf-8 -*-
#
# This file is part of Inspirehep.
# Copyright (C) 2016 CERN.
#
# Inspirehep is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Inspirehep is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Inspirehep; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Module for merging JSON objects.
To use this module you need to first import the main class:
>>> from json_merger import Merger
Then, import the configuration options:
>>> from json_merger.config import UnifierOps, DictMergerOps
The Basic Use Case
------------------
Let's assume we have JSON records that don't have any list fields --
They have string keys and as values other objects or primitive types.
In order to perform a merge we assume we have a lowest common ancestor
(``root``), a current version (``head``) and another version wich we want to
integrate into our record (``update``).
>>> root = {'name': 'John'} # A common ancestor of our person record
>>> head = {'name': 'Johnny', 'age': 32} # The current version of the record.
>>> update = {'name': 'Jonathan', 'address': 'Home'} # An updated version.
In this case we want to use the merger to compute one of the possible versions.
We create a merger instance in which we provide the default operation for
non-list fields and the one for list fields.
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST)
... # Ignore UnifierOps for now.
>>> # We might get some exceptions
>>> from json_merger.errors import MergeError
>>> try:
... m.merge()
... except MergeError:
... pass # We don't care about this now.
>>> m.merged_root == {
... 'name': 'Johnny',
... 'age': 32,
... 'address': 'Home',
... }
True
The merged version kept the ``age`` field from the ``head`` object and the
``address`` field from the ``update`` object. The ``name`` field was different,
but because the strategy was ``FALLBACK_KEEP_HEAD`` the end result kept the
value from the ``head`` variable. To keep the ``update`` one, one can
use ``FALLBACK_KEEP_UPDATE``:
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_UPDATE,
... UnifierOps.KEEP_ONLY_HEAD_ENTITIES)
>>> rasised_something = False
>>> try:
... m.merge()
... except MergeError:
... raised_something = True
>>> m.merged_root == {
... 'name': 'Jonathan',
... 'age': 32,
... 'address': 'Home',
... }
True
If this type of conflict occurs, the merger will also populate a ``conflicts``
field. In this case the conflict holds the alternative name for our record.
Also, because a conflict occurred, the merge method also raised a MergeError.
For all the types of conflict that can be raised by the ``merge`` method
also check the :class:`json_merger.conflict.ConflictType` documentation.
>>> from json_merger.conflict import Conflict, ConflictType
>>> m.conflicts[0] == Conflict(ConflictType.SET_FIELD, ('name', ), 'Johnny')
True
>>> raised_something
True
Merging Lists With Base Values
------------------------------
For this example we are going to assume we want to merge sets of badges
that a person can receive.
>>> root = {'badges': ['bad', 'random']}
>>> head = {'badges': ['cool', 'nice', 'random']}
>>> update = {'badges': ['fun', 'nice', 'healthy']}
The most simple options are to either keep only the badges available in head
or only the badges available in the update. This can be done by specifying one
of:
* ``UnifierOps.KEEP_ONLY_HEAD_ENTITIES``
* ``UnifierOps.KEEP_ONLY_UPDATE_ENTITIES``
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_ONLY_HEAD_ENTITIES)
>>> m.merge() # No conflict here
>>> m.merged_root['badges'] == ['cool', 'nice', 'random']
True
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_ONLY_UPDATE_ENTITIES)
>>> m.merge()
>>> m.merged_root['badges'] == ['fun', 'nice', 'healthy']
True
If we want to do a union of the elements we can use:
* ``UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST``
* ``UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_UPDATE_FIRST``
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST)
>>> m.merge() # No conflict here
>>> m.merged_root['badges'] == ['cool', 'fun', 'nice', 'random', 'healthy']
True
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_UPDATE_FIRST)
>>> m.merge()
>>> m.merged_root['badges'] == ['fun', 'cool', 'nice', 'healthy', 'random']
True
These options keep the order relations between the entities. For example,
both ``'fun'`` and ``'cool'`` were placed before the ``'nice'`` entity but
between them there isn't any restriction. In such cases, for
``KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST`` we first pick the elements
that occur only in the `head` list and for
``KEEP_UPDATE_AND_HEAD_ENTITIES_UPDATE_FIRST`` we first pick the ones that
occur only in the `update` list. If no such ordering is possible we first
add the elements found in the prioritized list and then the remaining ones.
Also, the method will raise a REORDER conflict.
>>> m = Merger([], [1, 2, 5, 3], [3, 1, 2, 4],
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_HEAD_FIRST)
>>> try:
... m.merge()
... except MergeError:
... pass
>>> m.merged_root == [1, 2, 5, 3, 4]
True
>>> m.conflicts == [Conflict(ConflictType.REORDER, (), None)]
True
>>> m = Merger([], [1, 2, 5, 3], [3, 1, 2, 4],
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_UPDATE_FIRST)
>>> try:
... m.merge()
... except MergeError:
... pass
>>> m.merged_root == [3, 1, 2, 4, 5]
True
>>> m.conflicts == [Conflict(ConflictType.REORDER, (), None)]
True
In the case in which ``root`` is represented by the latest automatic update
of a record (e.g. crawling some metadata source),
``head`` by manual edits of ``root`` and ``update`` by a new automatic
update, we might want to preserve only the entities in ``update`` but
notify the user in case some manual addition was removed.
* ``UnifierOps.KEEP_UPDATE_ENTITIES_CONFLICT_ON_HEAD_DELETE``
>>> root = {'badges': ['bad', 'random']}
>>> head = {'badges': ['cool', 'nice', 'random']}
>>> update = {'badges': ['fun', 'nice', 'healthy']}
>>> m = Merger(root, head, update, DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_UPDATE_ENTITIES_CONFLICT_ON_HEAD_DELETE)
>>> try:
... m.merge()
... except MergeError:
... pass
>>> m.merged_root['badges'] == ['fun', 'nice', 'healthy']
True
>>> m.conflicts == [Conflict(ConflictType.ADD_BACK_TO_HEAD,
... ('badges', ), 'cool')]
True
In this case, only ``'cool'`` was added "manually" and removed by the update.
Merging Lists Of Objects
------------------------
Assume the most complex case in which we need to merge lists of objects which
can also contain nested lists.
>>> root = {
... 'people': [
... {'name': 'John', 'age': 13},
... {'name': 'Peter'},
... {'name': 'Max'}
... ]}
>>> head = {
... 'people': [
... {'name': 'John', 'age': 14,
... 'group': {'id': 'grp01'},
... 'person_id': '42',
... 'badges': [{'t': 'b0', 'e': True}, {'t': 'b1'}, {'t': 'b2'}]},
... {'name': 'Peter', 'age': 15,
... 'badges': [{'t': 'b0'}, {'t': 'b1'}, {'t': 'b2'}]},
... {'name': 'Max', 'age': 16}
... ]}
>>> update = {
... 'people': [
... {'name': 'Max', 'address': 'work'},
... {'name': 'Johnnie', 'address': 'home',
... 'group': {'id': 'GRP01'},
... 'person_id': '42',
... 'age': 15,
... 'badges': [{'t': 'b1'}, {'t': 'b2'}, {'t': 'b0', 'e': False}]},
... ]}
First of all we would like to define how to person records represent the same
entity. In this demo data model we can say that two records represent the
same person if any of the following is true:
* They have the same ``name``
* They have the same lowercased group id AND the same person_id
Then we define two badges as equal if they have the same ``t`` attribute.
In order to define a custom mode of linking records you can add comparator
classes for any of the list fields via the coparators keyword argument.
To define a simple comparsion that checks field equality you
can use :class:`json_merger.comparator.PrimaryKeyComparator`
In this case the fields from above look like this:
>>> from json_merger.comparator import PrimaryKeyComparator
>>> class PersonComparator(PrimaryKeyComparator):
... primary_key_fields = ['name', ['group.id', 'person_id']]
... normalization_functions = {'group.id': str.lower}
>>> class BadgesComparator(PrimaryKeyComparator):
... primary_key_fields = ['t']
Note:
You need to use a comparator class and not a comparator instance when
defining the equality of two objects.
Next we would like to define how to do the merging:
* In case of conflict keep ``head`` values.
* For every list try to keep only the update entities.
* For the badges list keep both entities with priority to the ``update``
values.
>>> comparators = {'people': PersonComparator,
... 'people.badges': BadgesComparator}
>>> list_merge_ops = {
... 'people.badges': UnifierOps.KEEP_UPDATE_AND_HEAD_ENTITIES_UPDATE_FIRST
... }
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_ONLY_UPDATE_ENTITIES,
... comparators=comparators,
... list_merge_ops=list_merge_ops)
>>> try:
... m.merge()
... except MergeError:
... pass
>>> m.merged_root == {
... 'people': [
... {'name': 'Max', 'address': 'work', 'age': 16},
... {'name': 'Johnnie', # Only update edited it.
... 'address': 'home',
... 'group': {'id': 'grp01'}, # From KEEP_HEAD
... 'person_id': '42',
... 'age': 14, # From KEEP_HEAD
... 'badges': [{'t': 'b1'}, {'t': 'b2'},
... {'t': 'b0', 'e': True}], # From KEEP_HEAD
... },
... ]}
True
Merging Data Lists
------------------
If you want to merge arrays of raw data (that do not encode any entities),
you can use the ``data_lists`` keyword argument. This argument treats
list indices as dictionary keys.
>>> root = {'f': {'matrix': [[0, 0], [0, 0]]}}
>>> head = {'f': {'matrix': [[1, 1], [0, 0]]}}
>>> update = {'f': {'matrix': [[0, 0], [1, 1]]}}
>>> m = Merger(root, head, update,
... DictMergerOps.FALLBACK_KEEP_HEAD,
... UnifierOps.KEEP_ONLY_UPDATE_ENTITIES,
... data_lists=['f.matrix'])
>>> m.merge()
>>> m.merged_root == {'f': {'matrix': [[1, 1], [1, 1]]}}
True
Extending Comparators
---------------------
The internal API uses classes that extend
:class:`json_merger.comparator.BaseComparator` in order to check the semantic
equality of JSON objects. The interals call the ``get_matches`` method which
is implemented in terms of the ``equals`` method. The most simple method to
extend this class is to override the ``equals`` method.
>>> from json_merger.comparator import BaseComparator
>>> class CustomComparator(BaseComparator):
... def equal(self, obj1, obj2):
... return abs(obj1 - obj2) < 0.2
>>> comp = CustomComparator([1, 2], [1, 2, 1.1])
>>> comp.get_matches('l1', 0) # elements matching l1[0] from l2
[(0, 1), (2, 1.1)]
If you want to implement another type of asignment you an compute all the
mathes and store them in the ``matches`` set by overriding the
``process_lists`` method. You need to put pairs of matching indices between
l1 and l2.
>>> from json_merger.comparator import BaseComparator
>>> class CustomComparator(BaseComparator):
... def process_lists(self):
... self.matches.add((0, 0))
... self.matches.add((0, 1))
>>> comp = CustomComparator(['foo', 'bar'], ['bar', 'foo'])
>>> comp.get_matches('l1', 0) # elements matching l1[0] from l2
[(0, 'bar'), (1, 'foo')]
[contrib] Distance Function Matching
------------------------------------
To implement fuzzy matching we also allow matching by using a distane
function. This ensures a 1:1 mapping betwen the entities by minimizing
the total distance between all linked entities. To mark two of them
as equal you can provide a threshold for that distance. (This is why
it's best to normalize it between 0 and 1). Also, for speeding
up the matching you also can hint possible matches by bucketing matching
elements using a normalization function. In the next example we would
match some points in the coordinate system, each of them lying in a specific
square. The distance that we are going to use is the euclidean distance.
We will normalize the points to their integer counterpart.
>>> from json_merger.contrib.inspirehep.comparators import (
... DistanceFunctionComparator)
>>> from math import sqrt
>>> class PointComparator(DistanceFunctionComparator):
... distance_function = lambda p1, p2: sqrt((p1[0] - p2[0]) ** 2 +
... (p1[1] - p2[1]) ** 2)
... normalization_functions = [lambda p: (int(p[0]), int(p[1]))]
... threshold = 0.5
>>> l1 = [(1.1, 1.1), (1.2, 1.2), (2.1, 2.1)]
>>> l2 = [(1.11, 1.11), (1.25, 1.25), (2.15, 2.15)]
>>> comp = PointComparator(l1, l2)
>>> comp.get_matches('l1', 0) # elements matching l1[0] from l2
[(0, (1.11, 1.11))]
>>> # match only the closest element, not everything under threshold.
>>> comp.get_matches('l1', 1)
[(1, (1.25, 1.25))]
>>> comp.get_matches('l1', 2)
[(2, (2.15, 2.15))]
[contrib] Custom Person Name Distance
-------------------------------------
We also provide a person name distance based on edit distance normalized
between 0 and 1. You just need to provide a function for tokenizing a full
name into NameToken or NameInitial - check ``simple_tokenize`` in the
contrib directory. This distance function matches initials with full
regular tokens and works with any name permutation. Also, this distance
calculator assumes the full name is inside the ``full_name`` field of a
dictionary. If you have the name in a different field you can just override
the class and call ``super`` on objects having the name in the ``full_name``
field.
>>> from json_merger.contrib.inspirehep.author_util import (
... AuthorNameDistanceCalculator, simple_tokenize)
>>> dst = AuthorNameDistanceCalculator(tokenize_function=simple_tokenize)
>>> dst({'full_name': u'Doe, J.'}, {'full_name': u'John, Doe'}) < 0.1
True
Also we have functions for normalizing an author name with different
heuristics to speed up the distance function matching.
>>> from json_merger.contrib.inspirehep.author_util import (
... AuthorNameNormalizer)
>>> identity = AuthorNameNormalizer(simple_tokenize)
>>> identity({'full_name': 'Doe, Johnny Peter'}) # doctest: +SKIP
('doe', 'johnny', 'peter')
>>> asciified = AuthorNameNormalizer(simple_tokenize,
... asciify=True)
>>> asciified({'full_name': 'Dœ, Jöhnny Péter'}) # doctest: +SKIP
('doe', 'johnny', 'peter')
>>> one_fst_name = AuthorNameNormalizer(simple_tokenize,
... first_names_number=1)
>>> one_fst_name({'full_name': 'Doe, Johnny Peter'}) # doctest: +SKIP
('doe', 'johnny')
>>> last_name_one_initial = AuthorNameNormalizer(simple_tokenize,
... first_names_number=1,
... first_name_to_initial=True)
... # doctest: +SKIP
>>> last_name_one_initial({'full_name': 'Doe, Johnny Peter'}) # doctest: +SKIP
('doe', 'j')
These instances can be used as class parameters for
``DistanceFunctionComparator``
"""
from __future__ import absolute_import, print_function
from .merger import Merger
from .version import __version__
__all__ = ('__version__', 'Merger')
|
gpl-2.0
| 5,904,750,406,376,395,000 | 37.604167 | 79 | 0.640163 | false |
YannickB/odoo-hosting
|
clouder_template_drupal/__manifest__.py
|
1
|
1507
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron
# Copyright 2015, TODAY Clouder SASU
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License with Attribution
# clause as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License with
# Attribution clause along with this program. If not, see
# <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Clouder Template Drupal',
'version': '10.0.10.0.0',
'category': 'Clouder',
'depends': [
'clouder_template_dns',
'clouder_template_shinken',
'clouder_template_mail',
'clouder_template_proxy',
'clouder_template_postgres',
'clouder_template_piwik'
],
'author': 'Yannick Buron (Clouder)',
'license': 'LGPL-3',
'website': 'https://github.com/clouder-community/clouder',
'demo': [],
'data': ['template.xml'],
'installable': True,
'application': True,
}
|
agpl-3.0
| -5,479,058,793,165,318,000 | 34.880952 | 79 | 0.60783 | false |
SUNET/eduid-signup
|
eduid_signup/session.py
|
1
|
1724
|
from zope.interface import implementer
from pyramid.interfaces import ISessionFactory, ISession
from eduid_common.session.pyramid_session import SessionFactory as CommonSessionFactory
from eduid_common.session.pyramid_session import Session as CommonSession
import logging
logger = logging.getLogger(__name__)
_EDIT_USER_EPPN = 'edit-user_eppn'
_USER_EPPN = 'user_eppn'
Session = implementer(ISession)(CommonSession)
@implementer(ISessionFactory)
class SessionFactory(CommonSessionFactory):
'''
Session factory implementing the pyramid.interfaces.ISessionFactory
interface.
It uses the SessionManager defined in eduid_common.session.session
to create sessions backed by redis.
'''
def __call__(self, request):
'''
Create a session object for the given request.
:param request: the request
:type request: pyramid.request.Request
:return: the session
:rtype: Session
'''
self.request = request
settings = request.registry.settings
session_name = settings.get('session.key')
cookies = request.cookies
token = cookies.get(session_name, None)
if token is not None:
try:
base_session = self.manager.get_session(token=token)
existing_session = Session(request, base_session)
return existing_session
except KeyError: # No session data found
pass
base_session = self.manager.get_session(data={})
base_session['flash_messages'] = {'default': []}
base_session.commit()
session = Session(request, base_session, new=True)
session.set_cookie()
return session
|
bsd-3-clause
| -769,802,457,438,294,800 | 31.528302 | 87 | 0.664153 | false |
peterfeiner/wcp
|
wcp/cli.py
|
1
|
4339
|
# Copyright (C) 2014 Peter Feiner
import argparse
import sys
import os
import signal
from . import record
from . import report
SIGNALS = {}
for name in dir(signal):
if name.startswith('SIG') and not name.startswith('SIG_'):
SIGNALS[name] = getattr(signal, name)
def parse_signal(string):
if string is None:
return None
try:
return int(string)
except ValueError:
name = string.upper()
if not name.startswith('SIG'):
name = 'SIG%s' % name
try:
return SIGNALS[name]
except KeyError:
raise Exception('Invalid signal %r. Use a number or a name: %s'\
% (string, ', '.join(SIGNALS.keys())))
def record_main(args):
parser = argparse.ArgumentParser(prog='wcp record')
parser.add_argument('script_path',
help='Python script to profile.')
parser.add_argument('-f', '--frequency', default=10, type=float,
help='Number of samples per second. Default is 10.')
parser.add_argument('-o', '--output', default='wcp.data',
help='Output file. Default is wcp.data')
parser.add_argument('-d', '--detach-fork', action='store_true',
help='Do not sample child processes.')
parser.add_argument('-g', '--sample-greenlets', action='store_true',
help='Treat greenlet coroutintes like threads.'),
parser.add_argument('-s', '--start-signal', default=None,
help='Start sampling on this signal. '
'Disabled by default.'),
parser.add_argument('-S', '--stop-signal', default=None,
help='Stop sampling on this signal. '
'Disabled by default.'),
parser.add_argument('-n', '--no-autostart', action='store_true',
help='Do not start sampling; wait for signal.')
opts, script_args = parser.parse_known_args(args)
argv = [opts.script_path] + script_args
record_opts = record.Options()
record_opts.frequency = opts.frequency
record_opts.out_fd = os.open(opts.output,
os.O_WRONLY | os.O_TRUNC |
os.O_CREAT | os.O_APPEND,
0666)
record_opts.follow_fork = not opts.detach_fork
record_opts.sample_greenlets = opts.sample_greenlets
record_opts.autostart = not opts.no_autostart
start_signal = parse_signal(opts.start_signal)
stop_signal = parse_signal(opts.stop_signal)
if start_signal is None and opts.no_autostart:
raise Exception('Need start signal if autostart is disabled.')
if stop_signal is None:
stop_signal = start_signal
record_opts.stop_signal = stop_signal
record_opts.start_signal = start_signal
record.record_script(argv, record_opts)
def report_main(args):
parser = argparse.ArgumentParser(prog='wcp report')
parser.add_argument('-d', '--data-path', default='wcp.data',
help='Sample file. Default is wcp.data.')
parser.add_argument('-t', '--top-down', action='store_true',
help='Root call chain at entry points.')
opts = parser.parse_args(args)
report_opts = report.Options()
report_opts.data_path = opts.data_path
report_opts.top_down = opts.top_down
report.write(report_opts, sys.stdout)
def main():
add_help = True
i = 0
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if arg.startswith('-') and not arg.startswith('--') and 'h' in arg or\
arg == '--help':
break
if not arg.startswith('-'):
add_help = False
break
commands = dict([(name[:-5], value) for name, value in globals().items()
if name.endswith('_main')])
parser = argparse.ArgumentParser(add_help=add_help)
parser.add_argument('command', help='one of %s' % ', '.join(commands))
opts = parser.parse_args(sys.argv[1:i + 1])
command_args = sys.argv[i + 1:]
try:
command_main = commands[opts.command]
except KeyError:
sys.stderr.write('invalid command: %s\n' % opts.command)
sys.exit(1)
command_main(command_args)
if __name__ == '__main__':
main()
|
gpl-2.0
| 6,219,256,643,953,565,000 | 35.771186 | 78 | 0.577322 | false |
ralph-group/pymeasure
|
pymeasure/display/windows.py
|
1
|
37114
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2021 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import os
import subprocess, platform
import pyqtgraph as pg
from .browser import BrowserItem
from .curves import ResultsCurve
from .manager import Manager, Experiment, ImageExperiment, ImageManager
from .Qt import QtCore, QtGui
from .widgets import (
PlotWidget,
BrowserWidget,
InputsWidget,
LogWidget,
ResultsDialog,
SequencerWidget,
ImageWidget,
DirectoryLineEdit,
)
from ..experiment.results import Results
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class PlotterWindow(QtGui.QMainWindow):
"""
A window for plotting experiment results. Should not be
instantiated directly, but only via the
:class:`~pymeasure.display.plotter.Plotter` class.
.. seealso::
Tutorial :ref:`tutorial-plotterwindow`
A tutorial and example code for using the Plotter and PlotterWindow.
.. attribute plot::
The `pyqtgraph.PlotItem`_ object for this window. Can be
accessed to further customise the plot view programmatically, e.g.,
display log-log or semi-log axes by default, change axis range, etc.
.. pyqtgraph.PlotItem: http://www.pyqtgraph.org/documentation/graphicsItems/plotitem.html
"""
def __init__(self, plotter, refresh_time=0.1, parent=None):
super().__init__(parent)
self.plotter = plotter
self.refresh_time = refresh_time
columns = plotter.results.procedure.DATA_COLUMNS
self.setWindowTitle('Results Plotter')
self.main = QtGui.QWidget(self)
vbox = QtGui.QVBoxLayout(self.main)
vbox.setSpacing(0)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(6)
hbox.setContentsMargins(-1, 6, -1, -1)
file_label = QtGui.QLabel(self.main)
file_label.setText('Data Filename:')
self.file = QtGui.QLineEdit(self.main)
self.file.setText(plotter.results.data_filename)
hbox.addWidget(file_label)
hbox.addWidget(self.file)
vbox.addLayout(hbox)
self.plot_widget = PlotWidget(columns, refresh_time=self.refresh_time, check_status=False)
self.plot = self.plot_widget.plot
vbox.addWidget(self.plot_widget)
self.main.setLayout(vbox)
self.setCentralWidget(self.main)
self.main.show()
self.resize(800, 600)
self.curve = ResultsCurve(plotter.results, columns[0], columns[1],
pen=pg.mkPen(color=pg.intColor(0), width=2), antialias=False)
self.plot.addItem(self.curve)
self.plot_widget.updated.connect(self.check_stop)
def quit(self, evt=None):
log.info("Quitting the Plotter")
self.close()
self.plotter.stop()
def check_stop(self):
""" Checks if the Plotter should stop and exits the Qt main loop if so
"""
if self.plotter.should_stop():
QtCore.QCoreApplication.instance().quit()
class ManagedWindow(QtGui.QMainWindow):
"""
Abstract base class.
The ManagedWindow provides an interface for inputting experiment
parameters, running several experiments
(:class:`~pymeasure.experiment.procedure.Procedure`), plotting
result curves, and listing the experiments conducted during a session.
The ManagedWindow uses a Manager to control Workers in a Queue,
and provides a simple interface. The :meth:`~.queue` method must be
overridden by the child class.
.. seealso::
Tutorial :ref:`tutorial-managedwindow`
A tutorial and example on the basic configuration and usage of ManagedWindow.
.. attribute:: plot
The `pyqtgraph.PlotItem`_ object for this window. Can be
accessed to further customise the plot view programmatically, e.g.,
display log-log or semi-log axes by default, change axis range, etc.
.. _pyqtgraph.PlotItem: http://www.pyqtgraph.org/documentation/graphicsItems/plotitem.html
"""
def __init__(self, procedure_class, inputs=(), displays=(), x_axis=None, y_axis=None,
log_channel='', log_level=logging.INFO, parent=None, sequencer=False,
sequencer_inputs=None, sequence_file=None, inputs_in_scrollarea=False, directory_input=False):
super().__init__(parent)
app = QtCore.QCoreApplication.instance()
app.aboutToQuit.connect(self.quit)
self.procedure_class = procedure_class
self.inputs = inputs
self.displays = displays
self.use_sequencer = sequencer
self.sequencer_inputs = sequencer_inputs
self.sequence_file = sequence_file
self.inputs_in_scrollarea = inputs_in_scrollarea
self.directory_input = directory_input
self.log = logging.getLogger(log_channel)
self.log_level = log_level
log.setLevel(log_level)
self.log.setLevel(log_level)
self.x_axis, self.y_axis = x_axis, y_axis
self._setup_ui()
self._layout()
self.setup_plot(self.plot)
def _setup_ui(self):
self.log_widget = LogWidget()
self.log.addHandler(self.log_widget.handler) # needs to be in Qt context?
log.info("ManagedWindow connected to logging")
if self.directory_input:
self.directory_label = QtGui.QLabel(self)
self.directory_label.setText('Directory')
self.directory_line = DirectoryLineEdit(parent=self)
self.queue_button = QtGui.QPushButton('Queue', self)
self.queue_button.clicked.connect(self.queue)
self.abort_button = QtGui.QPushButton('Abort', self)
self.abort_button.setEnabled(False)
self.abort_button.clicked.connect(self.abort)
self.plot_widget = PlotWidget(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
self.plot = self.plot_widget.plot
self.browser_widget = BrowserWidget(
self.procedure_class,
self.displays,
[self.x_axis, self.y_axis],
parent=self
)
self.browser_widget.show_button.clicked.connect(self.show_experiments)
self.browser_widget.hide_button.clicked.connect(self.hide_experiments)
self.browser_widget.clear_button.clicked.connect(self.clear_experiments)
self.browser_widget.open_button.clicked.connect(self.open_experiment)
self.browser = self.browser_widget.browser
self.browser.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.browser.customContextMenuRequested.connect(self.browser_item_menu)
self.browser.itemChanged.connect(self.browser_item_changed)
self.inputs = InputsWidget(
self.procedure_class,
self.inputs,
parent=self
)
self.manager = Manager(self.plot, self.browser, log_level=self.log_level, parent=self)
self.manager.abort_returned.connect(self.abort_returned)
self.manager.queued.connect(self.queued)
self.manager.running.connect(self.running)
self.manager.finished.connect(self.finished)
self.manager.log.connect(self.log.handle)
if self.use_sequencer:
self.sequencer = SequencerWidget(
self.sequencer_inputs,
self.sequence_file,
parent=self
)
def _layout(self):
self.main = QtGui.QWidget(self)
inputs_dock = QtGui.QWidget(self)
inputs_vbox = QtGui.QVBoxLayout(self.main)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.queue_button)
hbox.addWidget(self.abort_button)
hbox.addStretch()
if self.directory_input:
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.directory_label)
vbox.addWidget(self.directory_line)
vbox.addLayout(hbox)
if self.inputs_in_scrollarea:
inputs_scroll = QtGui.QScrollArea()
inputs_scroll.setWidgetResizable(True)
inputs_scroll.setFrameStyle(QtGui.QScrollArea.NoFrame)
self.inputs.setSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
inputs_scroll.setWidget(self.inputs)
inputs_vbox.addWidget(inputs_scroll, 1)
else:
inputs_vbox.addWidget(self.inputs)
if self.directory_input:
inputs_vbox.addLayout(vbox)
else:
inputs_vbox.addLayout(hbox)
inputs_vbox.addStretch(0)
inputs_dock.setLayout(inputs_vbox)
dock = QtGui.QDockWidget('Input Parameters')
dock.setWidget(inputs_dock)
dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
if self.use_sequencer:
sequencer_dock = QtGui.QDockWidget('Sequencer')
sequencer_dock.setWidget(self.sequencer)
sequencer_dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, sequencer_dock)
tabs = QtGui.QTabWidget(self.main)
tabs.addTab(self.plot_widget, "Results Graph")
tabs.addTab(self.log_widget, "Experiment Log")
splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(tabs)
splitter.addWidget(self.browser_widget)
self.plot_widget.setMinimumSize(100, 200)
vbox = QtGui.QVBoxLayout(self.main)
vbox.setSpacing(0)
vbox.addWidget(splitter)
self.main.setLayout(vbox)
self.setCentralWidget(self.main)
self.main.show()
self.resize(1000, 800)
def quit(self, evt=None):
if self.manager.is_running():
self.abort()
self.close()
def browser_item_changed(self, item, column):
if column == 0:
state = item.checkState(0)
experiment = self.manager.experiments.with_browser_item(item)
if state == 0:
self.plot.removeItem(experiment.curve)
else:
experiment.curve.x = self.plot_widget.plot_frame.x_axis
experiment.curve.y = self.plot_widget.plot_frame.y_axis
experiment.curve.update()
self.plot.addItem(experiment.curve)
def browser_item_menu(self, position):
item = self.browser.itemAt(position)
if item is not None:
experiment = self.manager.experiments.with_browser_item(item)
menu = QtGui.QMenu(self)
# Open
action_open = QtGui.QAction(menu)
action_open.setText("Open Data Externally")
action_open.triggered.connect(
lambda: self.open_file_externally(experiment.results.data_filename))
menu.addAction(action_open)
# Change Color
action_change_color = QtGui.QAction(menu)
action_change_color.setText("Change Color")
action_change_color.triggered.connect(
lambda: self.change_color(experiment))
menu.addAction(action_change_color)
# Remove
action_remove = QtGui.QAction(menu)
action_remove.setText("Remove Graph")
if self.manager.is_running():
if self.manager.running_experiment() == experiment: # Experiment running
action_remove.setEnabled(False)
action_remove.triggered.connect(lambda: self.remove_experiment(experiment))
menu.addAction(action_remove)
# Use parameters
action_use = QtGui.QAction(menu)
action_use.setText("Use These Parameters")
action_use.triggered.connect(
lambda: self.set_parameters(experiment.procedure.parameter_objects()))
menu.addAction(action_use)
menu.exec_(self.browser.viewport().mapToGlobal(position))
def remove_experiment(self, experiment):
reply = QtGui.QMessageBox.question(self, 'Remove Graph',
"Are you sure you want to remove the graph?",
QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.manager.remove(experiment)
def show_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Checked)
def hide_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Unchecked)
def clear_experiments(self):
self.manager.clear()
def open_experiment(self):
dialog = ResultsDialog(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
if dialog.exec_():
filenames = dialog.selectedFiles()
for filename in map(str, filenames):
if filename in self.manager.experiments:
QtGui.QMessageBox.warning(self, "Load Error",
"The file %s cannot be opened twice." % os.path.basename(
filename))
elif filename == '':
return
else:
results = Results.load(filename)
experiment = self.new_experiment(results)
experiment.curve.update()
experiment.browser_item.progressbar.setValue(100.)
self.manager.load(experiment)
log.info('Opened data file %s' % filename)
def change_color(self, experiment):
color = QtGui.QColorDialog.getColor(
initial=experiment.curve.opts['pen'].color(), parent=self)
if color.isValid():
pixelmap = QtGui.QPixmap(24, 24)
pixelmap.fill(color)
experiment.browser_item.setIcon(0, QtGui.QIcon(pixelmap))
experiment.curve.setPen(pg.mkPen(color=color, width=2))
def open_file_externally(self, filename):
""" Method to open the datafile using an external editor or viewer. Uses the default
application to open a datafile of this filetype, but can be overridden by the child
class in order to open the file in another application of choice.
"""
system = platform.system()
if (system == 'Windows'):
# The empty argument after the start is needed to be able to cope correctly with filenames with spaces
proc = subprocess.Popen(['start', '', filename], shell=True)
elif (system == 'Linux'):
proc = subprocess.Popen(['xdg-open', filename])
elif (system == 'Darwin'):
proc = subprocess.Popen(['open', filename])
else:
raise Exception("{cls} method open_file_externally does not support {system} OS".format(cls=type(self).__name__,system=system))
def make_procedure(self):
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not make a Procedure"
" without a InputsWidget type")
return self.inputs.get_procedure()
def new_curve(self, results, color=None, **kwargs):
if color is None:
color = pg.intColor(self.browser.topLevelItemCount() % 8)
return self.plot_widget.new_curve(results, color=color, **kwargs)
def new_experiment(self, results, curve=None):
if curve is None:
curve = self.new_curve(results)
browser_item = BrowserItem(results, curve)
return Experiment(results, curve, browser_item)
def set_parameters(self, parameters):
""" This method should be overwritten by the child class. The
parameters argument is a dictionary of Parameter objects.
The Parameters should overwrite the GUI values so that a user
can click "Queue" to capture the same parameters.
"""
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not set parameters"
" without a InputsWidget")
self.inputs.set_parameters(parameters)
def queue(self):
"""
Abstract method, which must be overridden by the child class.
Implementations must call ``self.manager.queue(experiment)`` and pass
an ``experiment``
(:class:`~pymeasure.experiment.experiment.Experiment`) object which
contains the
:class:`~pymeasure.experiment.results.Results` and
:class:`~pymeasure.experiment.procedure.Procedure` to be run.
For example:
.. code-block:: python
def queue(self):
filename = unique_filename('results', prefix="data") # from pymeasure.experiment
procedure = self.make_procedure() # Procedure class was passed at construction
results = Results(procedure, filename)
experiment = self.new_experiment(results)
self.manager.queue(experiment)
"""
raise NotImplementedError(
"Abstract method ManagedWindow.queue not implemented")
def setup_plot(self, plot):
"""
This method does nothing by default, but can be overridden by the child
class in order to set up custom options for the plot
This method is called during the constructor, after all other set up has
been completed, and is provided as a convenience method to parallel Plotter.
:param plot: This window's PlotItem instance.
.. _PlotItem: http://www.pyqtgraph.org/documentation/graphicsItems/plotitem.html
"""
pass
def abort(self):
self.abort_button.setEnabled(False)
self.abort_button.setText("Resume")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.resume)
try:
self.manager.abort()
except:
log.error('Failed to abort experiment', exc_info=True)
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
def resume(self):
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
if self.manager.experiments.has_next():
self.manager.resume()
else:
self.abort_button.setEnabled(False)
def queued(self, experiment):
self.abort_button.setEnabled(True)
self.browser_widget.show_button.setEnabled(True)
self.browser_widget.hide_button.setEnabled(True)
self.browser_widget.clear_button.setEnabled(True)
def running(self, experiment):
self.browser_widget.clear_button.setEnabled(False)
def abort_returned(self, experiment):
if self.manager.experiments.has_next():
self.abort_button.setText("Resume")
self.abort_button.setEnabled(True)
else:
self.browser_widget.clear_button.setEnabled(True)
def finished(self, experiment):
if not self.manager.experiments.has_next():
self.abort_button.setEnabled(False)
self.browser_widget.clear_button.setEnabled(True)
@property
def directory(self):
if not self.directory_input:
raise ValueError("No directory input in the ManagedWindow")
return self.directory_line.text()
# TODO: Inheret from ManagedWindow to share code and features
class ManagedImageWindow(QtGui.QMainWindow):
"""
Abstract base class.
The MangedImageWindow provides an interface for inputting experiment
parameters, running several experiments
(:class:`~pymeasure.experiment.procedure.Procedure`), plotting
result curves, and listing the experiments conducted during a session.
The MangedImageWindow uses a Manager to control Workers in a Queue,
and provides a simple interface. The :meth:`~.queue` method must be
overridden by the child class.
.. seealso::
Tutorial :ref:`tutorial-managedwindow`
A tutorial and example on the basic configuration and usage of MangedImageWindow.
.. attribute:: plot
The `pyqtgraph.PlotItem`_ object for this window. Can be
accessed to further customise the plot view programmatically, e.g.,
display log-log or semi-log axes by default, change axis range, etc.
.. _pyqtgraph.PlotItem: http://www.pyqtgraph.org/documentation/graphicsItems/plotitem.html
"""
def __init__(self, procedure_class, x_axis, y_axis, z_axis=None, inputs=(), displays=(),
log_channel='', log_level=logging.INFO, parent=None):
super().__init__(parent)
app = QtCore.QCoreApplication.instance()
app.aboutToQuit.connect(self.quit)
self.procedure_class = procedure_class
self.inputs = inputs
self.displays = displays
self.log = logging.getLogger(log_channel)
self.log_level = log_level
log.setLevel(log_level)
self.log.setLevel(log_level)
self.x_axis, self.y_axis, self.z_axis = x_axis, y_axis, z_axis
self._setup_ui()
self._layout()
self.setup_im_plot(self.im_plot)
self.setup_plot(self.plot)
def _setup_ui(self):
self.log_widget = LogWidget()
self.log.addHandler(self.log_widget.handler) # needs to be in Qt context?
log.info("ManagedWindow connected to logging")
self.queue_button = QtGui.QPushButton('Queue', self)
self.queue_button.clicked.connect(self.queue)
self.abort_button = QtGui.QPushButton('Abort', self)
self.abort_button.setEnabled(False)
self.abort_button.clicked.connect(self.abort)
self.image_widget = ImageWidget(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis, self.z_axis)
self.plot_widget = PlotWidget(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
self.im_plot = self.image_widget.plot
self.plot = self.plot_widget.plot
self.browser_widget = BrowserWidget(
self.procedure_class,
self.displays,
[self.x_axis, self.y_axis],
parent=self
)
self.browser_widget.show_button.clicked.connect(self.show_experiments)
self.browser_widget.hide_button.clicked.connect(self.hide_experiments)
self.browser_widget.clear_button.clicked.connect(self.clear_experiments)
self.browser_widget.open_button.clicked.connect(self.open_experiment)
self.browser = self.browser_widget.browser
self.browser.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.browser.customContextMenuRequested.connect(self.browser_item_menu)
self.browser.itemChanged.connect(self.browser_item_changed)
self.inputs = InputsWidget(
self.procedure_class,
self.inputs,
parent=self
)
self.manager = ImageManager(self.plot, self.im_plot, self.browser, log_level=self.log_level, parent=self)
self.manager.abort_returned.connect(self.abort_returned)
self.manager.queued.connect(self.queued)
self.manager.running.connect(self.running)
self.manager.finished.connect(self.finished)
self.manager.log.connect(self.log.handle)
def _layout(self):
self.main = QtGui.QWidget(self)
inputs_dock = QtGui.QWidget(self)
inputs_vbox = QtGui.QVBoxLayout(self.main)
hbox = QtGui.QHBoxLayout()
hbox.setSpacing(10)
hbox.setContentsMargins(-1, 6, -1, 6)
hbox.addWidget(self.queue_button)
hbox.addWidget(self.abort_button)
hbox.addStretch()
inputs_vbox.addWidget(self.inputs)
inputs_vbox.addLayout(hbox)
inputs_vbox.addStretch()
inputs_dock.setLayout(inputs_vbox)
dock = QtGui.QDockWidget('Input Parameters')
dock.setWidget(inputs_dock)
dock.setFeatures(QtGui.QDockWidget.NoDockWidgetFeatures)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
tabs = QtGui.QTabWidget(self.main)
tabs.addTab(self.image_widget, "Results Image")
tabs.addTab(self.plot_widget, "Results Graph")
tabs.addTab(self.log_widget, "Experiment Log")
splitter = QtGui.QSplitter(QtCore.Qt.Vertical)
splitter.addWidget(tabs)
splitter.addWidget(self.browser_widget)
self.image_widget.setMinimumSize(100, 200)
self.plot_widget.setMinimumSize(100, 200)
vbox = QtGui.QVBoxLayout(self.main)
vbox.setSpacing(0)
vbox.addWidget(splitter)
self.main.setLayout(vbox)
self.setCentralWidget(self.main)
self.main.show()
self.resize(1000, 800)
def quit(self, evt=None):
if self.manager.is_running():
self.abort()
self.close()
def browser_item_changed(self, item, column):
if column == 0:
state = item.checkState(0)
experiment = self.manager.experiments.with_browser_item(item)
if state == 0:
self.im_plot.removeItem(experiment.image)
self.plot.removeItem(experiment.curve) # QUESTION: will this work?? probably need to modify experiment
else:
# add regular plot
experiment.curve.x = self.plot_widget.plot_frame.x_axis
experiment.curve.y = self.plot_widget.plot_frame.y_axis
experiment.curve.update()
self.plot.addItem(experiment.curve)
# add/update image plot
experiment.image.update_img()
self.im_plot.addItem(experiment.image)
def browser_item_menu(self, position):
item = self.browser.itemAt(position)
if item is not None:
experiment = self.manager.experiments.with_browser_item(item)
menu = QtGui.QMenu(self)
# Open
action_open = QtGui.QAction(menu)
action_open.setText("Open Data Externally")
action_open.triggered.connect(
lambda: self.open_file_externally(experiment.results.data_filename))
menu.addAction(action_open)
# Change Color
action_change_color = QtGui.QAction(menu)
action_change_color.setText("Change Color")
action_change_color.triggered.connect(
lambda: self.change_color(experiment))
menu.addAction(action_change_color)
# Remove
action_remove = QtGui.QAction(menu)
action_remove.setText("Remove Graph")
if self.manager.is_running():
if self.manager.running_experiment() == experiment: # Experiment running
action_remove.setEnabled(False)
action_remove.triggered.connect(lambda: self.remove_experiment(experiment))
menu.addAction(action_remove)
# Use parameters
action_use = QtGui.QAction(menu)
action_use.setText("Use These Parameters")
action_use.triggered.connect(
lambda: self.set_parameters(experiment.procedure.parameter_objects()))
menu.addAction(action_use)
menu.exec_(self.browser.viewport().mapToGlobal(position))
def remove_experiment(self, experiment):
reply = QtGui.QMessageBox.question(self, 'Remove Graph',
"Are you sure you want to remove the graph?",
QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.manager.remove(experiment)
def show_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Checked)
def hide_experiments(self):
root = self.browser.invisibleRootItem()
for i in range(root.childCount()):
item = root.child(i)
item.setCheckState(0, QtCore.Qt.Unchecked)
def clear_experiments(self):
self.manager.clear()
def open_experiment(self):
dialog = ResultsDialog(self.procedure_class.DATA_COLUMNS, self.x_axis, self.y_axis)
if dialog.exec_():
filenames = dialog.selectedFiles()
for filename in map(str, filenames):
if filename in self.manager.experiments:
QtGui.QMessageBox.warning(self, "Load Error",
"The file %s cannot be opened twice." % os.path.basename(
filename))
elif filename == '':
return
else:
results = Results.load(filename)
experiment = self.new_experiment(results)
experiment.curve.update() # QUESTION: will this work?
experiment.image.update_img()
experiment.browser_item.progressbar.setValue(100.)
self.manager.load(experiment)
log.info('Opened data file %s' % filename)
def change_color(self, experiment):
color = QtGui.QColorDialog.getColor(
initial=experiment.curve.opts['pen'].color(), parent=self)
if color.isValid():
pixelmap = QtGui.QPixmap(24, 24)
pixelmap.fill(color)
experiment.browser_item.setIcon(0, QtGui.QIcon(pixelmap))
experiment.curve.setPen(pg.mkPen(color=color, width=2))
def open_file_externally(self, filename):
""" Method to open the datafile using an external editor or viewer. Uses the default
application to open a datafile of this filetype, but can be overridden by the child
class in order to open the file in another application of choice.
"""
system = platform.system()
if (system == 'Windows'):
# The empty argument after the start is needed to be able to cope correctly with filenames with spaces
proc = subprocess.Popen(['start', '', filename], shell=True)
elif (system == 'Linux'):
proc = subprocess.Popen(['xdg-open', filename])
elif (system == 'Darwin'):
proc = subprocess.Popen(['open', filename])
else:
raise Exception("{cls} method open_file_externally does not support {system} OS".format(cls=type(self).__name__,system=system))
def make_procedure(self):
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not make a Procedure"
" without an InputsWidget type")
return self.inputs.get_procedure()
def new_curve(self, results, color=None, **kwargs):
if color is None:
color = pg.intColor(self.browser.topLevelItemCount() % 8)
return self.plot_widget.new_curve(results, color=color, **kwargs)
def new_image(self, results, **kwargs):
return self.image_widget.new_image(results, **kwargs)
# TODO: make shure whatever calls this can supply both if needed
def new_experiment(self, results, image=None, curve=None):
if image is None:
image = self.new_image(results)
if curve is None:
curve = self.new_curve(results)
browser_item = BrowserItem(results, curve)
return ImageExperiment(results, curve, image, browser_item)
def set_parameters(self, parameters):
""" This method should be overwritten by the child class. The
parameters argument is a dictionary of Parameter objects.
The Parameters should overwrite the GUI values so that a user
can click "Queue" to capture the same parameters.
"""
if not isinstance(self.inputs, InputsWidget):
raise Exception("ManagedWindow can not set parameters"
" without an InputsWidget")
self.inputs.set_parameters(parameters)
def queue(self):
"""
Abstract method, which must be overridden by the child class.
Implementations must call ``self.manager.queue(experiment)`` and pass
an ``experiment``
(:class:`~pymeasure.experiment.experiment.Experiment`) object which
contains the
:class:`~pymeasure.experiment.results.Results` and
:class:`~pymeasure.experiment.procedure.Procedure` to be run.
For example:
.. code-block:: python
def queue(self):
filename = unique_filename('results', prefix="data") # from pymeasure.experiment
procedure = self.make_procedure() # Procedure class was passed at construction
results = Results(procedure, filename)
experiment = self.new_experiment(results)
self.manager.queue(experiment)
"""
raise NotImplementedError(
"Abstract method ManagedWindow.queue not implemented")
def setup_plot(self, plot):
"""
This method does nothing by default, but can be overridden by the child
class in order to set up custom options for the plot
This method is called during the constructor, after all other set up has
been completed, and is provided as a convenience method to parallel Plotter.
:param plot: This window's PlotItem instance.
.. _PlotItem: http://www.pyqtgraph.org/documentation/graphicsItems/plotitem.html
"""
pass
def setup_im_plot(self, im_plot):
"""
This method does nothing by default, but can be overridden by the child
class in order to set up custom options for the image plot
This method is called during the constructor, after all other set up has
been completed, and is provided as a convenience method to parallel Plotter.
:param im_plot: This window's ImageItem instance.
"""
pass
def abort(self):
self.abort_button.setEnabled(False)
self.abort_button.setText("Resume")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.resume)
try:
self.manager.abort()
except:
log.error('Failed to abort experiment', exc_info=True)
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
def resume(self):
self.abort_button.setText("Abort")
self.abort_button.clicked.disconnect()
self.abort_button.clicked.connect(self.abort)
if self.manager.experiments.has_next():
self.manager.resume()
else:
self.abort_button.setEnabled(False)
def queued(self, experiment):
self.abort_button.setEnabled(True)
self.browser_widget.show_button.setEnabled(True)
self.browser_widget.hide_button.setEnabled(True)
self.browser_widget.clear_button.setEnabled(True)
def running(self, experiment):
self.browser_widget.clear_button.setEnabled(False)
def abort_returned(self, experiment):
if self.manager.experiments.has_next():
self.abort_button.setText("Resume")
self.abort_button.setEnabled(True)
else:
self.browser_widget.clear_button.setEnabled(True)
def finished(self, experiment):
if not self.manager.experiments.has_next():
self.abort_button.setEnabled(False)
self.browser_widget.clear_button.setEnabled(True)
|
mit
| -5,536,501,795,903,811,000 | 38.315678 | 139 | 0.630975 | false |
openstack/tripleo-heat-templates
|
tripleo_heat_templates/tests/test_environment_generator.py
|
1
|
18885
|
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import tempfile
from unittest import mock
from oslotest import base
import six
import testscenarios
from tripleo_heat_templates import environment_generator
load_tests = testscenarios.load_tests_apply_scenarios
basic_template = '''
parameters:
FooParam:
default: foo
description: Foo description
type: string
BarParam:
default: 42
description: Bar description
type: number
EndpointMap:
default: {}
description: Parameter that should not be included by default
type: json
resources:
# None
'''
basic_private_template = '''
parameters:
FooParam:
default: foo
description: Foo description
type: string
_BarParam:
default: 42
description: Bar description
type: number
resources:
# None
'''
mandatory_template = '''
parameters:
FooParam:
description: Mandatory param
type: string
resources:
# None
'''
index_template = '''
parameters:
FooParam:
description: Param with %index% as its default
type: string
default: '%index%'
resources:
# None
'''
multiline_template = '''
parameters:
FooParam:
description: |
Parameter with
multi-line description
type: string
default: ''
resources:
# None
'''
basic_role_param_template = '''
parameters:
RoleParam:
description: Role param description
type: string
default: ''
FooParam:
description: Foo description
default: foo
type: string
resources:
# None
'''
multiline_role_param_template = '''
parameters:
RoleParam:
description: |
Role Parameter with
multi-line description
type: string
default: ''
FooParam:
description: |
Parameter with
multi-line description
type: string
default: ''
resources:
# None
'''
class GeneratorTestCase(base.BaseTestCase):
content_scenarios = [
('basic',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: foo
''',
}),
('basic-one-param',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters:
- FooParam
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
''',
}),
('basic-static-param',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
static:
- BarParam
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Bar description
# Type: number
BarParam: 42
# *********************
# End static parameters
# *********************
''',
}),
('basic-static-param-sample',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
static:
- BarParam
sample_values:
BarParam: 1
FooParam: ''
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: ''
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Bar description
# Type: number
BarParam: 1
# *********************
# End static parameters
# *********************
''',
}),
('basic-private',
{'template': basic_private_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
''',
}),
('mandatory',
{'template': mandatory_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Mandatory param
# Mandatory. This parameter must be set by the user.
# Type: string
FooParam: <None>
''',
}),
('basic-sample',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
sample_values:
FooParam: baz
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: baz
''',
}),
('basic-resource-registry',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
resource_registry:
OS::TripleO::FakeResource: fake-filename.yaml
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Foo description
# Type: string
FooParam: foo
resource_registry:
OS::TripleO::FakeResource: fake-filename.yaml
''',
}),
('basic-hidden',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
sample_values:
EndpointMap: |-2
foo: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Bar description
# Type: number
BarParam: 42
# Parameter that should not be included by default
# Type: json
EndpointMap:
foo: bar
# Foo description
# Type: string
FooParam: foo
''',
}),
('missing-param',
{'template': basic_template,
'exception': RuntimeError,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters:
- SomethingNonexistent
''',
'expected_output': None,
}),
('percent-index',
{'template': index_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Param with %index% as its default
# Type: string
FooParam: '%index%'
''',
}),
('nested',
{'template': multiline_template,
'exception': None,
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
children:
- name: nested
title: Nested Environment
description: Nested description
sample_values:
FooParam: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
''',
'nested_output': '''# title: Nested Environment
# description: |
# Nested description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: bar
''',
}),
('multi-line-desc',
{'template': multiline_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
files:
foo.yaml:
parameters: all
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
''',
}),
('basic_role_param',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_role_param
title: Basic Role Parameters Environment
description: Basic description
files:
foo.yaml:
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Basic Role Parameters Environment
# description: |
# Basic description
parameter_defaults:
RoleParameters:
# Role param description
# Type: string
RoleParam: ''
''',
}),
('multiline_role_param',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multiline_role_param
title: Multiline Role Parameters Environment
description: Multiline description
files:
foo.yaml:
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Multiline Role Parameters Environment
# description: |
# Multiline description
parameter_defaults:
RoleParameters:
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
''',
}),
('Basic mix params',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_mix_params
title: Basic Mix Parameters Environment
description: Basic description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Basic Mix Parameters Environment
# description: |
# Basic description
parameter_defaults:
# Foo description
# Type: string
FooParam: foo
RoleParameters:
# Role param description
# Type: string
RoleParam: ''
''',
}),
('Multiline mix params',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multiline_mix_params
title: Multiline mix params Environment
description: Multiline description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
''',
'expected_output': '''# title: Multiline mix params Environment
# description: |
# Multiline description
parameter_defaults:
# Parameter with
# multi-line description
# Type: string
FooParam: ''
RoleParameters:
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
''',
}),
('Basic role static param',
{'template': basic_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic_role_static_param
title: Basic Role Static Prams Environment
description: Basic Role Static Prams description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
static:
- FooParam
- RoleParam
''',
'expected_output': '''# title: Basic Role Static Prams Environment
# description: |
# Basic Role Static Prams description
parameter_defaults:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Foo description
# Type: string
FooParam: foo
# *********************
# End static parameters
# *********************
RoleParameters:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Role param description
# Type: string
RoleParam: ''
# *********************
# End static parameters
# *********************
''',
}),
('Multiline role static param',
{'template': multiline_role_param_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: multline_role_static_param
title: Multiline Role Static Prams Environment
description: Multiline Role Static Prams description
files:
foo.yaml:
parameters:
- FooParam
RoleParameters:
- RoleParam
static:
- FooParam
- RoleParam
''',
'expected_output': '''# title: Multiline Role Static Prams Environment
# description: |
# Multiline Role Static Prams description
parameter_defaults:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Parameter with
# multi-line description
# Type: string
FooParam: ''
# *********************
# End static parameters
# *********************
RoleParameters:
# ******************************************************
# Static parameters - these are values that must be
# included in the environment but should not be changed.
# ******************************************************
# Role Parameter with
# multi-line description
# Type: string
RoleParam: ''
# *********************
# End static parameters
# *********************
''',
}),
('no-files',
{'template': basic_template,
'exception': None,
'nested_output': '',
'input_file': '''environments:
-
name: basic
title: Basic Environment
description: Basic description
resource_registry:
foo: bar
''',
'expected_output': '''# title: Basic Environment
# description: |
# Basic description
resource_registry:
foo: bar
''',
}),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(
cls.content_scenarios)
def test_generator(self):
fake_input = io.StringIO(six.text_type(self.input_file))
fake_template = io.StringIO(six.text_type(self.template))
_, fake_output_path = tempfile.mkstemp()
fake_output = open(fake_output_path, 'w')
with mock.patch('tripleo_heat_templates.environment_generator.open',
create=True) as mock_open:
mock_se = [fake_input, fake_template, fake_output]
if 'files:' not in self.input_file:
# No files were specified so that open call won't happen
mock_se.remove(fake_template)
if self.nested_output:
_, fake_nested_output_path = tempfile.mkstemp()
fake_nested_output = open(fake_nested_output_path, 'w')
fake_template2 = io.StringIO(six.text_type(self.template))
mock_se = [fake_input, fake_template, fake_output,
fake_template2, fake_nested_output]
mock_open.side_effect = mock_se
if not self.exception:
environment_generator.generate_environments('ignored.yaml',
'environments')
else:
self.assertRaises(self.exception,
environment_generator.generate_environments,
'ignored.yaml',
'environments')
return
expected = environment_generator._FILE_HEADER + self.expected_output
with open(fake_output_path) as f:
self.assertEqual(expected, f.read())
if self.nested_output:
with open(fake_nested_output_path) as f:
expected = (environment_generator._FILE_HEADER +
self.nested_output)
self.assertEqual(expected, f.read())
GeneratorTestCase.generate_scenarios()
|
apache-2.0
| 992,682,378,127,931,500 | 23.654047 | 80 | 0.562775 | false |
t11e/django
|
django/core/cache/backends/filebased.py
|
1
|
4639
|
"File-based cache backend"
import os
import time
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
from django.core.cache.backends.base import BaseCache
from django.utils.hashcompat import md5_constructor
class CacheClass(BaseCache):
def __init__(self, dir, params):
BaseCache.__init__(self, params)
max_entries = params.get('max_entries', 300)
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', 3)
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self._dir = dir
if not os.path.exists(self._dir):
self._createdir()
def add(self, key, value, timeout=None):
if self.has_key(key):
return False
self.set(key, value, timeout)
return True
def get(self, key, default=None):
fname = self._key_to_file(key)
try:
f = open(fname, 'rb')
exp = pickle.load(f)
now = time.time()
if exp < now:
f.close()
self._delete(fname)
else:
return pickle.load(f)
except (IOError, OSError, EOFError, pickle.PickleError):
pass
return default
def set(self, key, value, timeout=None):
fname = self._key_to_file(key)
dirname = os.path.dirname(fname)
if timeout is None:
timeout = self.default_timeout
self._cull()
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(fname, 'wb')
now = time.time()
pickle.dump(now + timeout, f, pickle.HIGHEST_PROTOCOL)
pickle.dump(value, f, pickle.HIGHEST_PROTOCOL)
except (IOError, OSError):
pass
def delete(self, key):
try:
self._delete(self._key_to_file(key))
except (IOError, OSError):
pass
def _delete(self, fname):
os.remove(fname)
try:
# Remove the 2 subdirs if they're empty
dirname = os.path.dirname(fname)
os.rmdir(dirname)
os.rmdir(os.path.dirname(dirname))
except (IOError, OSError):
pass
def has_key(self, key):
fname = self._key_to_file(key)
try:
f = open(fname, 'rb')
exp = pickle.load(f)
now = time.time()
if exp < now:
f.close()
self._delete(fname)
return False
else:
return True
except (IOError, OSError, EOFError, pickle.PickleError):
return False
def _cull(self):
if int(self._num_entries) < self._max_entries:
return
try:
filelist = os.listdir(self._dir)
except (IOError, OSError):
return
if self._cull_frequency == 0:
doomed = filelist
else:
doomed = [os.path.join(self._dir, k) for (i, k) in enumerate(filelist) if i % self._cull_frequency == 0]
for topdir in doomed:
try:
for root, _, files in os.walk(topdir):
for f in files:
self._delete(os.path.join(root, f))
except (IOError, OSError):
pass
def _createdir(self):
try:
os.makedirs(self._dir)
except OSError:
raise EnvironmentError("Cache directory '%s' does not exist and could not be created'" % self._dir)
def _key_to_file(self, key):
"""
Convert the filename into an md5 string. We'll turn the first couple
bits of the path into directory prefixes to be nice to filesystems
that have problems with large numbers of files in a directory.
Thus, a cache key of "foo" gets turnned into a file named
``{cache-dir}ac/bd/18db4cc2f85cedef654fccc4a4d8``.
"""
path = md5_constructor(key.encode('utf-8')).hexdigest()
path = os.path.join(path[:2], path[2:4], path[4:])
return os.path.join(self._dir, path)
def _get_num_entries(self):
count = 0
for _,_,files in os.walk(self._dir):
count += len(files)
return count
_num_entries = property(_get_num_entries)
def clear(self):
try:
shutil.rmtree(self._dir)
except (IOError, OSError):
pass
|
bsd-3-clause
| -217,061,094,118,470,720 | 28.176101 | 116 | 0.533951 | false |
Wikidata/StrepHit
|
doc/update_doc.py
|
1
|
3071
|
import requests
import click
import subprocess
import os
class WikimediaApi:
logged_in = False
def __init__(self, endpoint='https://www.mediawiki.org/w/api.php'):
self.session = requests.Session()
self.api_endpoint = endpoint
def call_api(self, action, **kwargs):
r = self.session.post(self.api_endpoint + '?format=json&action=' + action,
data=kwargs)
r.raise_for_status()
return r.json()
def get_token(self, token_type):
resp = self.call_api('query', meta='tokens', type=token_type)
self.logged_in = True
return resp['query']['tokens'].values()[0]
def login(self, user, password):
if not self.logged_in:
token = self.get_token('login')
resp = self.call_api('login', lgname=user, lgpassword=password,
lgtoken=token)
assert resp['login']['result'] == 'Success', \
'could not login: ' + repr(resp)
self.logged_in = True
def logout(self):
if self.logged_in:
self.call_api('logout')
self.logged_in = False
@click.command()
@click.argument('username')
@click.argument('password')
@click.argument('page')
@click.option('--upload/--build-only', default=True)
def main(username, password, page, upload):
""" Builds the documentation and uploads it to the given mediawiki page
"""
base_dir = 'build/wikisyntax'
revision = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
summary = 'doc updated to revision ' + revision
print 'Current revision is', revision
print 'Building the documentation ...'
subprocess.check_output(['make', 'clean', 'apidoc', 'wikisyntax', 'APIDOCOPTS=-f -M -T'],
stderr=subprocess.STDOUT)
page_titles = set([x for x in os.listdir(base_dir) if x not in {'modules.wiki', 'strephit.wiki', 'index.wiki'}])
pages = ['index.wiki'] + sorted(page_titles)
content = ''
for each in pages:
with open(os.path.join(base_dir, each)) as f:
content += f.read() + '\n'
print 'Uploading ...'
if upload:
wiki = WikimediaApi()
try:
wiki.login(username, password)
token = wiki.get_token('csrf')
resp = wiki.call_api('edit',
title=page,
text=content,
contentformat='text/x-wiki',
bot=True, token=token,
summary=summary)
assert resp.get('edit', {}).get('result') == 'Success', \
'could not edit: ' + repr(resp)
print summary
finally:
wiki.logout()
else:
try:
with open(page, 'w') as f:
f.write(content)
except (OSError, IOError):
pass
print summary
print 'Test run - documentation was NOT updated'
if __name__ == '__main__':
main()
|
gpl-3.0
| -4,140,018,537,711,046,700 | 30.659794 | 116 | 0.537284 | false |
TeamWorkQualityReport/TeamWorkQualityReport
|
Backend/Analise/Analisador.py
|
1
|
9696
|
import numpy as np
from Analise.IAnalisador import IAnalisador
from Analise.entidades.formulario import Formulario
from Analise.entidades.variavel import Variavel
from Analise.entidades.questao import Questao
from Analise.entidades.relatorioResultado import RelatorioResultado
from Analise.entidades.constructo import Constructo
from Analise.entidades.variavelResultado import VariavelResultado
from Analise.entidades.correlacaoVariaveis import CorrelacaoVariaveis
from scipy.stats.stats import pearsonr
class Analisador(IAnalisador):
respostaInvertido = [0,5,4,3,2,1]
def cronbachAlpha(self, itemscores):
variance_sum = float(itemscores.var(axis=0, ddof=1).sum()) ## TODO verificar se os eixos que estao sendo utilizados estao corretos
total_var= float(itemscores.sum(axis=1).var(ddof=1)) ## axis=0 e axis=1. Na implementacao original esta invertido,
nitems = itemscores.shape[1] ## mas esta gerando valores negativos para o Alfa
alfa = (nitems/float(nitems - 1)*(1 - (variance_sum / total_var)))
return alfa
def calcularAlfaVariavies(self, questoes):
alfas = []
x,y = questoes.shape
for i in range(0,y): # COLUNAS = PARA CADA VARIAVEL
coluna = questoes[0:x, i]
variavelRespostas = np.empty(shape=(0,len(coluna[0].questoes)))
for v in coluna:
respostas = []
for q in v.questoes:
respostaQuestao = None
if q.invertido:
respostaQuestao = self.respostaInvertido[q.resposta]
else:
respostaQuestao = q.resposta
respostas.append(respostaQuestao)
variavelRespostas = np.vstack([variavelRespostas, respostas])
alfa = self.cronbachAlpha(variavelRespostas)
alfas.append(alfa)
return alfas
def avaliarVariaveis(self, variaveis, mapeamentoVariaveis, constructos, alfas):
media = np.mean(variaveis, axis=0)
desvio = np.std(variaveis, axis=0)
correlacao = np.corrcoef(variaveis, rowvar=False)
for c in constructos:
c.variaveis = []
for v in range(0,variaveis.shape[1]):
v_resultado = VariavelResultado(mapeamentoVariaveis[v][0], alfas[v], media[v], desvio[v], self.criarCorrelacoes(v, correlacao[v], mapeamentoVariaveis))
constructos[mapeamentoVariaveis[v][1]].variaveis.append(v_resultado)
resultado = RelatorioResultado(None, None, None, constructos)
return resultado
def mapearVariaveis(self, formulario):
mapeamentoVariaveis = []
qtdVariavies = 0;
for i in range(0, len(formulario.constructos)):
for v in formulario.constructos[i].variaveis:
mapeamentoVariaveis.append((v.nome, i))
qtdVariavies += 1
return mapeamentoVariaveis, qtdVariavies
def avaliarFormulario(self, formularios):
mapeamentoVariaveis, qtdVariavies = self.mapearVariaveis(formularios[0])
resultado = np.empty(shape=(0,qtdVariavies)) ## matriz com as medias das variaveis para cada form
variaveisForm = np.empty(shape=(0,qtdVariavies)) ## matriz com das variaveis para cada form. Utilizado para calcular o alfa
for f in formularios:
variaveisMedias = []
for c in f.constructos:
variaveis = []
for v in c.variaveis:
respostas = []
variaveis.append(v)
for q in v.questoes:
respostaQuestao = None
if q.invertido:
respostaQuestao = self.respostaInvertido[q.resposta]
else:
respostaQuestao = q.resposta
respostas.append(respostaQuestao)
mediaVariavel = np.mean(np.array(respostas))
variaveisMedias.append(mediaVariavel)
resultado = np.vstack([resultado, variaveisMedias])
variaveisForm = np.vstack([variaveisForm, variaveis])
constructos = formularios[0].constructos
alfas = self.calcularAlfaVariavies(variaveisForm)
resultado = self.avaliarVariaveis(resultado, mapeamentoVariaveis, constructos, alfas)
resultado.nome = nome = formularios[0].nome
print ("Resultado")
print(resultado)
return resultado
def gerarRelatorio(self, formularios):
return self.avaliarFormulario(formularios)
def criarCorrelacoes(self, variavelCorrente, correlacoes, mapeamentoVariaveis):
correlacoesVariaveis = []
for i in range(0, len(correlacoes)):
correlacoesVariaveis.append(CorrelacaoVariaveis("",mapeamentoVariaveis[i][0], correlacoes[i]))
return correlacoesVariaveis
def gerarRelatorioTeste(self):
relatorio = RelatorioResultado(None, None, None, [])
relatorio.nome = "TWQ - Teste"
relatorio.dataInicial = "10/10/2017"
relatorio.dataFinal = "16/10/2017"
constructo1 = Constructo("TWQ", [])
correlacoesV1 = [CorrelacaoVariaveis(1, "Comunicacao", 0.0), CorrelacaoVariaveis(2, "Coordenacao", 0.5), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.5), CorrelacaoVariaveis(4, "Suporte mutuo", 0.5), CorrelacaoVariaveis(5, "Esforco", 0.5), CorrelacaoVariaveis(6, "Coesao", 0.5)]
correlacoesV2 = [CorrelacaoVariaveis(1, "Comunicacao", 0.5), CorrelacaoVariaveis(2, "Coordenacao", 0.0), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.5), CorrelacaoVariaveis(4, "Suporte mutuo", 0.5), CorrelacaoVariaveis(5, "Esforco", 0.5), CorrelacaoVariaveis(6, "Coesao", 0.5)]
correlacoesV3 = [CorrelacaoVariaveis(1, "Comunicacao", 0.5), CorrelacaoVariaveis(2, "Coordenacao", 0.5), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.0), CorrelacaoVariaveis(4, "Suporte mutuo", 0.5), CorrelacaoVariaveis(5, "Esforco", 0.5), CorrelacaoVariaveis(6, "Coesao", 0.5)]
correlacoesV4 = [CorrelacaoVariaveis(1, "Comunicacao", 0.5), CorrelacaoVariaveis(2, "Coordenacao", 0.5), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.5), CorrelacaoVariaveis(4, "Suporte mutuo", 0.0), CorrelacaoVariaveis(5, "Esforco", 0.5), CorrelacaoVariaveis(6, "Coesao", 0.5)]
correlacoesV5 = [CorrelacaoVariaveis(1, "Comunicacao", 0.5), CorrelacaoVariaveis(2, "Coordenacao", 0.5), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.5), CorrelacaoVariaveis(4, "Suporte mutuo", 0.5), CorrelacaoVariaveis(5, "Esforco", 0.0), CorrelacaoVariaveis(6, "Coesao", 0.5)]
correlacoesV6 = [CorrelacaoVariaveis(1, "Comunicacao", 0.5), CorrelacaoVariaveis(2, "Coordenacao", 0.5), CorrelacaoVariaveis(3, "Balanco da contribuicao", 0.5), CorrelacaoVariaveis(4, "Suporte mutuo", 0.5), CorrelacaoVariaveis(5, "Esforco", 0.5), CorrelacaoVariaveis(6, "Coesao", 0.0)]
variavel1 = VariavelResultado("Comunicacao", 0.73, 3.98, 0.26, correlacoesV1)
variavel2 = VariavelResultado("Coordenacao", 0.72, 3.78, 0.29, correlacoesV2)
variavel3 = VariavelResultado("B. contribuicao", 0.58, 3.96, 0.30, correlacoesV3)
variavel4 = VariavelResultado("Suporte mutuo", 0.85, 4.06, 0.29, correlacoesV4)
variavel5 = VariavelResultado("Esforco \t", 0.76, 3.98, 0.34, correlacoesV5)
variavel6 = VariavelResultado("Coesao \t", 0.86, 3.92, 0.28, correlacoesV6)
constructo1.variaveis = [variavel1, variavel2, variavel3, variavel4, variavel5, variavel6]
relatorio.constructos.append(constructo1)
print ("Resultado: ")
print (relatorio)
print ("Correlacoes entre as variaveis: ")
print ("\t\t\t 1 \t 2 \t 3 \t 4 \t 5 \t 6")
for v in constructo1.variaveis:
result = ""
for c in v.correlacoes:
result += "\t" + c.valor.__str__()
print (" - ", v.nome, result)
return relatorio
def formulariosMockup(self):
formularios = []
formulario = Formulario("TWQ", "Form mock up TWQ", 1, None)
constructo1 = Constructo("TWQ", [])
variavel1 = Variavel("Comunicacao", [Questao("Pergunta 1", False, 4), Questao("Pergunta 2", False, 3)])
variavel2 = Variavel("Coordenacao", [Questao("Pergunta 1", False, 1), Questao("Pergunta 2", False, 5)])
constructo1.variaveis = [variavel1, variavel2]
formulario.constructos = [constructo1]
formularios.append(formulario)
formulario = Formulario("TWQ", "Form mock up TWQ", 2, None)
constructo1 = Constructo("TWQ", [])
variavel1 = Variavel("Comunicacao", [Questao("Pergunta 1", False, 2), Questao("Pergunta 2", False, 2)])
variavel2 = Variavel("Coordenacao", [Questao("Pergunta 1", False, 3), Questao("Pergunta 2", False, 4)])
constructo1.variaveis = [variavel1, variavel2]
formulario.constructos = [constructo1]
formularios.append(formulario)
formulario = Formulario("TWQ", "Form mock up TWQ", 3, None)
constructo1 = Constructo("TWQ", [])
variavel1 = Variavel("Comunicacao", [Questao("Pergunta 1", False, 4), Questao("Pergunta 2", False, 5)])
variavel2 = Variavel("Coordenacao", [Questao("Pergunta 1", False, 4), Questao("Pergunta 2", False, 4)])
constructo1.variaveis = [variavel1, variavel2]
formulario.constructos = [constructo1]
formularios.append(formulario)
return formularios
#analisador = Analisador()
#formularios = analisador.formulariosMockup()
#analisador.gerarRelatorio(formularios)
#analisador.gerarRelatorioTeste()
|
gpl-3.0
| -2,382,247,268,170,386,400 | 49.5 | 293 | 0.649031 | false |
cronuspaas/cronusagent
|
agent/agent/lib/agent_thread/download_thread.py
|
1
|
13166
|
#pylint: disable=W0703,W0511,W0402,R0911,R0915,R0912,W0331,W0612,R0904,W0105
""" Thread to download a package """
from agent.lib import utils, contextutils, configutil
from agent.lib.agent_thread.agent_thread import AgentThread
from agent.lib.errors import Errors, FileNotFoundError, AgentException
from agent.lib.package import PackageUtil
from agent.lib.utils import loadPropFile
from random import randint
import json
import logging
import os
import pylons
import time
import traceback
import urlgrabber
LOG = logging.getLogger(__name__)
class DownloadThread(AgentThread):
""" Separate thread to start download """
def __init__(self, threadMgr, packageUri, packageloc, path = None, category = None, parentId = None):
AgentThread.__init__(self, threadMgr, cat = category, name = 'download_thread', parentId = parentId)
self._mergeOnFound = True
self.__path = pylons.config['repo_root']
if path is not None:
self.__path = path
# check to see if the package path exists
if (not os.path.isdir(self.__path)):
msg = 'Package path(%s) does not exist' % self.__path
LOG.error(msg)
raise AgentException(Errors.PACKAGE_PATH_ERROR, msg)
self.__uriDict = PackageUtil.parseUri(packageUri, self.__path, packageloc)
self.__prop = {}
self.__error = None
self.__progress = 0.0
self.__timeouts = None
self.__skipProp = configutil.getConfigAsBool('download_skip_prop')
def getUriDict(self):
''' Get the package info dictionary '''
return self.__uriDict
def beforeRun(self):
""" set external timeout values if any """
# set timeout
if contextutils.existcontext(self, 'thread_timeout'):
self._timeout = contextutils.getcontext(self, 'thread_timeout', self._timeout)
if contextutils.existcontext(self, 'thread_progress_timeout'):
self._progressTimeout = contextutils.getcontext(self, 'thread_progress_timeout', self._progressTimeout)
def doRun(self):
""" Progress Info:
0 : start
1 - 2 : prop file download
3 - 5 : checking existing data (hash verification)
6 - 8 : allocating disk space (in case of pre_allocate
9 - 99 : downloading data
100 : download complete.
"""
try:
self.__startDownload()
except AgentException as exc:
msg = 'Error for package (%s) - %s' % (self.__uriDict['package'], str(exc))
LOG.error(msg)
self._updateStatus(httpStatus = 500, error = exc.getCode(), errorMsg = msg)
except Exception as exc:
msg = 'Unknown error for package (%s) - %s' % (self.__uriDict['package'], str(exc))
LOG.error(msg)
self._updateStatus(httpStatus = 500, error = Errors.UNKNOWN_ERROR, errorMsg = msg)
def __startDownload(self):
""" actual download logic """
try:
LOG.info("Starting package download for package %s" % self.__uriDict['package'])
# check to see if there's an in progress file,
# since PackageMgr guarantees that duplicate threads will not be spawned
# for same pkg, assume an existing thread was killed.
# attempt to clean up package n move
if (os.path.exists(self.__uriDict['inProgressPackagePath'])):
LOG.debug('In progress file (%s) already exists. Cleanup and reattempt download'
% self.__uriDict['inProgressPackagePath'])
if os.path.exists(self.__uriDict['packagePath']):
if self.__skipProp or ((os.path.exists(self.__uriDict['propPath']) and
PackageUtil.validateProp(self.__uriDict['propPath']) and
PackageUtil.validatePackage(self.__uriDict['packagePath'],
self.__uriDict['propPath']))):
msg = 'The package already exists. Will NOT download duplicate package' + self.__uriDict['packagePath']
LOG.info(msg)
os.utime(self.__uriDict['packagePath'], None)
if os.path.exists(self.__uriDict['propPath']):
os.utime(self.__uriDict['propPath'], None)
self._updateStatus(progress = 100)
# NOTE: this is a normal exit not an error!
return
else:
LOG.warning('The package already exists. However package prop (%s) failed validation.'
% self.__uriDict['propPath'])
# Delete all traces of package before beginning download
LOG.debug('Cleaning up all packages for %s ' % self.__uriDict['packagePath'])
PackageUtil.cleanUpPackage(self.__uriDict['inProgressPackagePath'],
self.__uriDict['packagePath'],
self.__uriDict['propPath'])
AgentThread._updateProgress(self, 0)
if self.__skipProp:
LOG.info('Skip download of prop file')
else:
# First, download .prop file
LOG.info('Starting download of prop file %s - %s' % (self.__uriDict['propUri'], self.__uriDict['propPath']))
self.__download_prop_file()
try:
self.__prop = loadPropFile(self.__uriDict['propPath'])
except FileNotFoundError:
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) unable to read or did not parse' % (self.__uriDict['propPath']))
AgentThread._updateProgress(self, 2)
self.__setProgressTimeouts()
if self.__uriDict['scheme'] == 'http':
# try download 3 times, with random sleep
attempt = configutil.getConfigAsInt('download_thread_attempt')
for _ in range(attempt):
try:
sotimeout = float(pylons.config['download_thread_sotimeout'])
proxies = json.loads(pylons.config['urlgrabber_proxies'])
urlgrabber.urlgrab(self.__uriDict['uri'],
self.__uriDict['inProgressPackagePath'],
# checkfunc = None if self.__skipProp else (PackageUtil.validateDownload, (), {}),
progress_obj = DownloadProgress(self),
throttle = float(pylons.config['package_throttle']),
bandwidth = int(pylons.config['package_bandwidth']),
keepalive = 0,
timeout = sotimeout,
proxies = proxies)
break
except Exception as exc:
msg = 'Download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.warning(msg)
if _ == attempt-1:
raise exc
randsleep = randint(5, 10)
time.sleep(randsleep)
else:
# oops! only http supported now
raise AgentException(Errors.DC_UNSUPPORTED_PROTOCOL, 'Only http protocols is supported at the moment')
self._checkStop()
if self.__skipProp:
LOG.info('Skip validating against prop file')
else:
if (not PackageUtil.validatePackage(self.__uriDict['inProgressPackagePath'],
self.__uriDict['propPath'])):
raise AgentException(Errors.DC_FAILED_VALIDATE, 'Package ' +
self.__uriDict['packagePath'] + ' failed validation')
os.utime(self.__uriDict['propPath'], None)
utils.rchmod(self.__uriDict['propPath'], "777", 'no')
LOG.info('Download complete, rename this file %s' % self.__uriDict['packagePath'])
os.rename(self.__uriDict['inProgressPackagePath'], self.__uriDict['packagePath'])
os.utime(self.__uriDict['packagePath'], None)
utils.rchmod(self.__uriDict['packagePath'], "777", 'no')
LOG.info("Download complete, updating progress to 100")
self._updateStatus(progress = 100)
except AgentException, exc:
self._updateStatus(httpStatus = 500, progress = 0, error = exc.getCode(), errorMsg = exc.getMsg())
msg = 'Download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.error(msg)
raise exc
except Exception, exc:
self._updateStatus(httpStatus = 500, progress = 0, error = Errors.UNKNOWN_ERROR, errorMsg = str(exc))
msg = 'Unknown download error %s - %s' % (str(exc), traceback.format_exc(3))
LOG.error(msg)
raise exc
finally:
LOG.info("Completed package download for package %s" % self.__uriDict['package'])
def __setProgressTimeouts(self):
""" Setting timeout for download thread. The timeouts uses the size of the package."""
if not self.__skipProp:
timeout = float(self.__prop['size']) / float(pylons.config['download_thread_rate_per_sec'])
timeout = max(timeout, float(pylons.config['download_thread_min_time']))
timeout = min(timeout, float(pylons.config['download_thread_max_time']))
progressTimeout = timeout * float(pylons.config['download_thread_progress_ratio'])
progressTimeout = max(progressTimeout, float(pylons.config['download_thread_min_progress_time']))
self.extendTimeout(timeout)
self.setProgressTimeout(progressTimeout)
self.__timeouts = (timeout, progressTimeout)
else:
self.__timeouts = (self._timeout, self._progressTimeout)
LOG.debug('Using timeout=%s and progress timeout=%s' % self.__timeouts)
def getProgressTimeouts(self):
"""
Getting timeout from the download thread. The timeout is either None or consists
provides (total timeout, progress timeout)
"""
return self.__timeouts
def stop(self):
""" stopping client before calling the super method """
LOG.info('STOP download thread is called stopping')
AgentThread.stop(self)
def _updateHttpProgress(self, amount_read):
""" custom progress computation """
if not self.__skipProp:
progress = 2 + ((float(amount_read) / float(self.__prop['size'])) * (97))
AgentThread._updateProgress(self, progress)
else:
progress = min(self.getProgress(), 97) + 1
AgentThread._updateProgress(self, progress)
self._checkStop()
def __download_prop_file(self):
""" download prop file and validate """
# retry 3 times download prop file
for _ in range(3):
try:
sotimeout = float(pylons.config['download_thread_sotimeout'])
proxies = json.loads(pylons.config['urlgrabber_proxies'])
urlgrabber.urlgrab(
self.__uriDict['propUri'],
self.__uriDict['propPath'],
keepalive = 0,
timeout = sotimeout,
proxies = proxies)
break
except Exception:
randsleep = randint(30, 60)
time.sleep(randsleep)
if (not os.path.exists(self.__uriDict['propPath'])):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) does not exist' % (self.__uriDict['propPath']))
if not PackageUtil.validateProp(self.__uriDict['propPath']):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) failed validation' % (self.__uriDict['propPath']))
#pylint: disable=W0212
class DownloadProgress(object):
""" object to track the progress of a package """
def __init__(self, thread):
""" constructor """
object.__init__(self)
self.__thread = thread
def start(self, filename = None, url = None, basename = None,
size = None, now = None, text = None):
""" called during the start of the progress """
pass
def update(self, amount_read, now = None):
""" update the progress """
self.__thread._updateHttpProgress(amount_read)
def end(self, amount_read, now = None):
""" end the progress """
pass
|
apache-2.0
| 156,922,315,942,817,570 | 45.687943 | 125 | 0.544205 | false |
algorhythms/LeetCode
|
283 Move Zeroes.py
|
1
|
1204
|
"""
Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-
zero elements.
For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0].
Note:
You must do this in-place without making a copy of the array.
Minimize the total number of operations.
"""
__author__ = 'Daniel'
class Solution(object):
def moveZeroes(self, nums):
"""
Two pointers at the left side
Pivot
"""
left = -1
for i in xrange(len(nums)):
if nums[i] != 0:
left += 1
nums[left], nums[i] = nums[i], nums[left]
class SolutionCount(object):
def moveZeroes(self, nums):
"""
In-place
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
cnt = 0
for elt in nums:
if elt != 0:
nums[cnt] = elt
cnt += 1
for j in xrange(cnt, len(nums)):
nums[j] = 0
if __name__ == "__main__":
lst = [0, 1, 0, 3, 12]
Solution().moveZeroes(lst)
assert lst == [1, 3, 12, 0, 0]
|
mit
| -4,577,519,818,686,035,500 | 24.083333 | 119 | 0.531561 | false |
almlab/SmileTrain
|
tools/get_taxonomies.py
|
1
|
2955
|
#!/usr/bin/env python
'''
Get greengenes taxonomies. Given an otu table with otu ids in the first column, search through
the greengenes taxonomy list. Output the taxonomies in order.
If the input database is a pickle, just load that dictionary.
'''
import sys, argparse, re, cPickle as pickle
def table_ids(fn):
'''get the otu ids from the otu table with filename fn'''
with open(fn) as f:
ids = [line.split()[0] for line in f]
# remove the first item, which is "OTU_ID"
ids.pop(0)
return ids
def uc_ids(fn):
with open(fn) as f:
ids = [line.split()[-1] for line in f]
return ids
def list_ids(fn):
with open(fn) as f:
ids = [line.strip() for line in f]
return ids
def taxa_dictionary(fn, ids):
'''get the second field in lines whose first fields match ids'''
# populate a hash otu_id => taxonomy
d = {}
with open(fn) as f:
for line in f:
fields = line.split()
otu = fields[0]
tax = " ".join(fields[1:])
if otu in ids:
d[otu] = tax
return d
if __name__ == '__main__':
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('db', help='input database')
input_type = parser.add_mutually_exclusive_group(required=True)
input_type.add_argument('-t', '--table', default=None, help='input otu table')
input_type.add_argument('-u', '--uc', default=None, help='input uc file')
input_type.add_argument('-l', '--list', default=None, help='input plain text list')
input_type.add_argument('-w', '--word', default=None, help='input a single ID')
parser.add_argument('-i', '--no_match_id', default=None, help='OTU ID for no match (default "no_match" for table/list; "*" for uc)')
parser.add_argument('-x', '--no_match_tax', default='k__; p__; c__; o__; f__; g__; s__', help='taxonomy for unmatched OTU ID (default is QIIME taxonomy format)')
args = parser.parse_args()
# depending on the input type, adjust the no match id and parsing function
if args.table is not None:
ids = table_ids(args.table)
if args.no_match_id is None: args.no_match_id = 'no_match'
elif args.uc is not None:
ids = uc_ids(args.uc)
if args.no_match_id is None: args.no_match_id = '*'
elif args.list is not None:
ids = list_ids(args.list)
if args.no_match_id is None: args.no_match_id = 'no_match'
elif args.word is not None:
ids = [args.word]
# check if the database file ends in .pkl or .pickle
# if it is, used a pickled dictionary
# otherwise, just search line by line
if re.search("\.(pkl|pickle)$", args.db):
with open(args.db, 'rb') as f:
d = pickle.load(f)
else:
d = taxa_dictionary(args.db, ids)
d[args.no_match_id] = args.no_match_tax
print "\n".join([d[i] for i in ids])
|
mit
| -2,087,470,422,215,640,300 | 32.977011 | 165 | 0.601015 | false |
jimsize/PySolFC
|
pysollib/tk/tkhtml.py
|
1
|
4931
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# imports
import os
import sys
from six.moves import tkinter
# PySol imports
from pysollib.mygettext import _
from pysollib.mfxutil import Struct
# Toolkit imports
from .tkwidget import MfxMessageDialog
from .statusbar import HtmlStatusbar
from pysollib.ui.tktile.tkhtml import Base_HTMLViewer
if __name__ == '__main__':
d = os.path.abspath(os.path.join(sys.path[0], '..', '..'))
sys.path.append(d)
import gettext
gettext.install('pysol', d, unicode=True)
# ************************************************************************
# *
# ************************************************************************
class HTMLViewer(Base_HTMLViewer):
symbols_fn = {} # filenames, loaded in Application.loadImages3
symbols_img = {}
def _calc_MfxMessageDialog(self):
return MfxMessageDialog
def __init__(self, parent, app=None, home=None):
self.parent = parent
self.app = app
self.home = home
self.url = None
self.history = Struct(
list=[],
index=0,
)
self.visited_urls = []
# need to keep a reference because of garbage collection
self.images = {}
self.defcursor = parent["cursor"]
# self.defcursor = 'xterm'
self.handcursor = "hand2"
# create buttons
button_width = 8
self.homeButton = tkinter.Button(parent, text=_("Index"),
width=button_width,
command=self.goHome)
self.homeButton.grid(row=0, column=0, sticky='w')
self.backButton = tkinter.Button(parent, text=_("Back"),
width=button_width,
command=self.goBack)
self.backButton.grid(row=0, column=1, sticky='w')
self.forwardButton = tkinter.Button(parent, text=_("Forward"),
width=button_width,
command=self.goForward)
self.forwardButton.grid(row=0, column=2, sticky='w')
self.closeButton = tkinter.Button(parent, text=_("Close"),
width=button_width,
command=self.destroy)
self.closeButton.grid(row=0, column=3, sticky='e')
# create text widget
text_frame = tkinter.Frame(parent)
text_frame.grid(row=1, column=0, columnspan=4, sticky='nsew')
text_frame.grid_propagate(False)
vbar = tkinter.Scrollbar(text_frame)
vbar.pack(side='right', fill='y')
self.text = tkinter.Text(text_frame,
fg='black', bg='white',
bd=1, relief='sunken',
cursor=self.defcursor,
wrap='word', padx=10)
self.text.pack(side='left', fill='both', expand=True)
self.text["yscrollcommand"] = vbar.set
vbar["command"] = self.text.yview
# statusbar
self.statusbar = HtmlStatusbar(parent, row=2, column=0, columnspan=4)
parent.columnconfigure(2, weight=1)
parent.rowconfigure(1, weight=1)
# load images
for name, fn in self.symbols_fn.items():
self.symbols_img[name] = self.getImage(fn)
self.initBindings()
# ************************************************************************
# *
# ************************************************************************
def tkhtml_main(args):
try:
url = args[1]
except Exception:
url = os.path.join(os.pardir, os.pardir, "data", "html", "index.html")
top = tkinter.Tk()
top.wm_minsize(400, 200)
viewer = HTMLViewer(top)
viewer.app = None
viewer.display(url)
top.mainloop()
return 0
if __name__ == "__main__":
sys.exit(tkhtml_main(sys.argv))
|
gpl-3.0
| -2,449,040,235,664,294,000 | 34.221429 | 78 | 0.528493 | false |
jeremiahmarks/dangerzone
|
scripts/python/remoteAccessScripts/dirTree.py
|
1
|
2490
|
#this script will mount a remote file system using fuse and then generate a
#directory tree using os.walk
#TODO: for some reason the files that are on the top level all have a double slash
# I know that it is due to the line:
# k.write('{}{}/{}'.format(subindent,root.replace(startpath, 'http://jlmarks.org/'),f)+'\n')
# but it seems like ALL lines should have a double slash after the .org.
# I will have to consult with someone about that.
#TODO: format the results into a nice folding HTML page, either accordian style,
# or using <details><summary></summary></details> (This does not seem to
# render well in Chrome, though.
# sshfs jeremiahmarks@jlmarks.org:/home/content/06/6816406/html/ /home/jlmarks/remote/
import pw
import os
host=pw.hn
username=pw.un
remote_folder=pw.rf
local_folder=pw.lf
output=pw.fi
connectionString='sshfs '+username+'@'+host+':'+remote_folder+' '+local_folder
def list_files(startpath):
"""
List files is the work horse of the dirTree application. it connects to my
my hosting account by fusing the remote directory to my local file system.
It then crawls the file system and creates a txt file with the contents of
the file system.
It is similar to a site-map creating spider, but it the pages do not need to
be linked to anywhere since it can crawl the entire server via ssh.
"""
os.system(connectionString) #This creates the actual connection.
k=open(output, 'w') #This opens the file for adding contents of the directory
for root, dirs, files in os.walk(startpath):
#The next line replaces the part of the path that takes the user to the
#folder, and then counts the slashes to determine how deep to indent
#the results.
level = root.replace(startpath, '').count(os.sep)
#This creates four spaces for each level found above.
indent = ' ' * 4 * (level)
#the k.write line basically replaces the brackets with the results from
#first the indent and then by replacing the path to the folder with the
#first part of my domain.
k.write('{}{}'.format(indent,root.replace(startpath, 'http://jlmarks.org/')+'\n'))
subindent = ' ' * 4 * (level + 1)
for f in files:
k.write('{}{}/{}'.format(subindent,root.replace(startpath, 'http://jlmarks.org/'),f)+'\n')
k.close()
print""" Suggested use:\n\tdirTree.list_files(dirTree.local_folder) """
|
mit
| -4,775,350,889,011,808,000 | 40.5 | 102 | 0.675502 | false |
eLvErDe/nicotine-plus
|
test/unit/test_login.py
|
1
|
2247
|
__author__ = 'Lene Preuss <lene.preuss@gmail.com>'
from queue import Queue
from time import sleep
from unittest.mock import Mock, MagicMock
import pytest
from pynicotine.slskproto import SlskProtoThread
from pynicotine.slskmessages import ServerConn, Login, SetWaitPort
from pynicotine.utils import ApplyTranslation
from test.unit.mock_socket import monkeypatch_socket, monkeypatch_select
# Time (in s) needed for SlskProtoThread main loop to run at least once
SLSKPROTO_RUN_TIME = 0.5
LOGIN_DATAFILE = 'data/login/socket_localhost:22420.log'
@pytest.fixture(scope="module", autouse=True)
def apply_translations():
ApplyTranslation()
@pytest.fixture
def config():
config = MagicMock()
config.sections = {'server': {'portrange': (1, 2)}, 'transfers': {'downloadlimit': 10}}
return config
def test_instantiate_proto(config) -> None:
proto = SlskProtoThread(
ui_callback=Mock(), queue=Mock(), bindip='',
port=None, config=config, eventprocessor=Mock()
)
proto.abort()
def test_server_conn(config, monkeypatch) -> None:
mock_socket = monkeypatch_socket(monkeypatch, LOGIN_DATAFILE)
monkeypatch_select(monkeypatch)
proto = SlskProtoThread(
ui_callback=Mock(), queue=Queue(0), bindip='',
port=None, config=config, eventprocessor=Mock()
)
proto._queue.put(ServerConn())
sleep(SLSKPROTO_RUN_TIME)
proto.abort()
assert mock_socket.setsockopt.call_count == 1
assert mock_socket.setblocking.call_count == 2
assert mock_socket.bind.call_count == 1
assert mock_socket.connect_ex.call_count == 1
assert mock_socket.listen.call_count == 1
assert mock_socket.close.call_count == 1
def test_login(config, monkeypatch) -> None:
mock_socket = monkeypatch_socket(monkeypatch, LOGIN_DATAFILE)
monkeypatch_select(monkeypatch)
proto = SlskProtoThread(
ui_callback=Mock(), queue=Queue(0), bindip='',
port=None, config=config, eventprocessor=Mock()
)
proto._queue.put(ServerConn())
sleep(SLSKPROTO_RUN_TIME / 2)
proto._queue.put(Login('username', 'password', 157))
proto._queue.put(SetWaitPort(1))
sleep(SLSKPROTO_RUN_TIME)
proto.abort()
pytest.skip('Login succeeded, actual test TBD')
|
gpl-3.0
| -8,034,834,033,154,774,000 | 31.1 | 91 | 0.705385 | false |
spoonysonny/SAKS-tutorials
|
chengying/sakspins.py
|
1
|
1183
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 NXEZ.COM.
# http://www.nxez.com
#
# Licensed under the GNU General Public License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Spoony'
__version__ = 'version 0.0.1'
__license__ = 'Copyright (c) 2015 NXEZ.COM'
class SAKSPins(object):
'''
SAKS Pins Code With BCM for Raspberry Pi.
'''
IC_74HC595_DS = 6
IC_74HC595_SHCP = 19
IC_74HC595_STCP = 13
IC_TM1637_DI = 25
IC_TM1637_CLK = 5
BUZZER = 12
TACT_RIGHT = 20
TACT_LEFT = 16
DIP_SWITCH_1 = 21
DIP_SWITCH_2 = 26
IR_SENDER = 17
IR_RECEIVER = 9
DS18B20 = 4
UART_TXD = 14
UART_RXD = 15
I2C_SDA = 2
I2C_SLC = 3
|
gpl-2.0
| 2,732,638,948,741,312,000 | 23.666667 | 77 | 0.652578 | false |
leppa/home-assistant
|
homeassistant/components/volumio/media_player.py
|
1
|
9924
|
"""
Volumio Platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.volumio/
Volumio rest API: https://volumio.github.io/docs/API/REST_API.html
"""
import asyncio
from datetime import timedelta
import logging
import socket
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "localhost"
DEFAULT_NAME = "Volumio"
DEFAULT_PORT = 3000
DATA_VOLUMIO = "volumio"
TIMEOUT = 10
SUPPORT_VOLUMIO = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_PLAY
| SUPPORT_VOLUME_STEP
| SUPPORT_SELECT_SOURCE
| SUPPORT_SHUFFLE_SET
| SUPPORT_CLEAR_PLAYLIST
)
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=15)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Volumio platform."""
if DATA_VOLUMIO not in hass.data:
hass.data[DATA_VOLUMIO] = dict()
# This is a manual configuration?
if discovery_info is None:
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
else:
name = "{} ({})".format(DEFAULT_NAME, discovery_info.get("hostname"))
host = discovery_info.get("host")
port = discovery_info.get("port")
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr in hass.data[DATA_VOLUMIO]:
return
entity = Volumio(name, host, port, hass)
hass.data[DATA_VOLUMIO][ip_addr] = entity
async_add_entities([entity])
class Volumio(MediaPlayerDevice):
"""Volumio Player Object."""
def __init__(self, name, host, port, hass):
"""Initialize the media player."""
self.host = host
self.port = port
self.hass = hass
self._url = "{}:{}".format(host, str(port))
self._name = name
self._state = {}
self._lastvol = self._state.get("volume", 0)
self._playlists = []
self._currentplaylist = None
async def send_volumio_msg(self, method, params=None):
"""Send message."""
url = f"http://{self.host}:{self.port}/api/v1/{method}/"
_LOGGER.debug("URL: %s params: %s", url, params)
try:
websession = async_get_clientsession(self.hass)
response = await websession.get(url, params=params)
if response.status == 200:
data = await response.json()
else:
_LOGGER.error(
"Query failed, response code: %s Full message: %s",
response.status,
response,
)
return False
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error(
"Failed communicating with Volumio '%s': %s", self._name, type(error)
)
return False
try:
return data
except AttributeError:
_LOGGER.error("Received invalid response: %s", data)
return False
async def async_update(self):
"""Update state."""
resp = await self.send_volumio_msg("getState")
await self._async_update_playlists()
if resp is False:
return
self._state = resp.copy()
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def state(self):
"""Return the state of the device."""
status = self._state.get("status", None)
if status == "pause":
return STATE_PAUSED
if status == "play":
return STATE_PLAYING
return STATE_IDLE
@property
def media_title(self):
"""Title of current playing media."""
return self._state.get("title", None)
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self._state.get("artist", None)
@property
def media_album_name(self):
"""Artist of current playing media (Music track only)."""
return self._state.get("album", None)
@property
def media_image_url(self):
"""Image url of current playing media."""
url = self._state.get("albumart", None)
if url is None:
return
if str(url[0:2]).lower() == "ht":
mediaurl = url
else:
mediaurl = f"http://{self.host}:{self.port}{url}"
return mediaurl
@property
def media_seek_position(self):
"""Time in seconds of current seek position."""
return self._state.get("seek", None)
@property
def media_duration(self):
"""Time in seconds of current song duration."""
return self._state.get("duration", None)
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
volume = self._state.get("volume", None)
if volume is not None and volume != "":
volume = int(volume) / 100
return volume
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._state.get("mute", None)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return self._state.get("random", False)
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORT_VOLUMIO
def async_media_next_track(self):
"""Send media_next command to media player."""
return self.send_volumio_msg("commands", params={"cmd": "next"})
def async_media_previous_track(self):
"""Send media_previous command to media player."""
return self.send_volumio_msg("commands", params={"cmd": "prev"})
def async_media_play(self):
"""Send media_play command to media player."""
return self.send_volumio_msg("commands", params={"cmd": "play"})
def async_media_pause(self):
"""Send media_pause command to media player."""
if self._state["trackType"] == "webradio":
return self.send_volumio_msg("commands", params={"cmd": "stop"})
return self.send_volumio_msg("commands", params={"cmd": "pause"})
def async_set_volume_level(self, volume):
"""Send volume_up command to media player."""
return self.send_volumio_msg(
"commands", params={"cmd": "volume", "volume": int(volume * 100)}
)
def async_volume_up(self):
"""Service to send the Volumio the command for volume up."""
return self.send_volumio_msg(
"commands", params={"cmd": "volume", "volume": "plus"}
)
def async_volume_down(self):
"""Service to send the Volumio the command for volume down."""
return self.send_volumio_msg(
"commands", params={"cmd": "volume", "volume": "minus"}
)
def async_mute_volume(self, mute):
"""Send mute command to media player."""
mutecmd = "mute" if mute else "unmute"
if mute:
# mute is implemented as 0 volume, do save last volume level
self._lastvol = self._state["volume"]
return self.send_volumio_msg(
"commands", params={"cmd": "volume", "volume": mutecmd}
)
return self.send_volumio_msg(
"commands", params={"cmd": "volume", "volume": self._lastvol}
)
def async_set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
return self.send_volumio_msg(
"commands", params={"cmd": "random", "value": str(shuffle).lower()}
)
def async_select_source(self, source):
"""Choose a different available playlist and play it."""
self._currentplaylist = source
return self.send_volumio_msg(
"commands", params={"cmd": "playplaylist", "name": source}
)
def async_clear_playlist(self):
"""Clear players playlist."""
self._currentplaylist = None
return self.send_volumio_msg("commands", params={"cmd": "clearQueue"})
@Throttle(PLAYLIST_UPDATE_INTERVAL)
async def _async_update_playlists(self, **kwargs):
"""Update available Volumio playlists."""
self._playlists = await self.send_volumio_msg("listplaylists")
|
apache-2.0
| -1,982,843,421,541,624,000 | 29.535385 | 86 | 0.602479 | false |
dgu-dna/DNA-Bot
|
apps/quiz.py
|
1
|
7875
|
from apps.decorators import on_command
from apps.slackutils import cat_token, isNumber, get_nickname
from time import localtime, strftime
import os
import re
import json
import time
import urllib
import random
CACHE_DEFAULT_URL = './apps/quiz_cache/'
CACHE_CATEGORY_URL = './apps/quiz_cache/category/'
def get_random_question(channel):
info_file = CACHE_DEFAULT_URL + channel + '.json'
cdat = json.loads(open(info_file).read())
quizRaw = open(CACHE_CATEGORY_URL + cdat['category'] + '.json').read()
qdat = json.loads(quizRaw)
cdat['solved'].append(cdat['q_num'])
if len(cdat['solved']) >= cdat['q_max']:
return False
rand_num = random.randrange(0, qdat['q_num'])
while rand_num + 1 in cdat['solved']:
rand_num = random.randrange(0, qdat['q_num'])
question = qdat['question'][rand_num]
answer = qdat['answer'][rand_num]
cdat['last_solved'] = int(round(time.time() * 1000))
cdat['q_num'] = rand_num + 1
cdat['question'] = question
cdat['answer'] = answer
cdat['skip_count'] = []
cdat['give_up'] = []
with open(info_file, 'w') as fp:
json.dump(cdat, fp, indent=4)
return True
def get_answer(channel):
cdat = json.loads(open(CACHE_DEFAULT_URL + channel + '.json').read())
answer = re.sub(r'\s*\(.*\)', '', cdat['answer'])
hint = re.sub(r'.*\(', '(', cdat['answer'])
if hint == cdat['answer']:
hint = ''
return answer, hint
def get_message(channel):
msg = ''
if channel[0] == 'C':
msg += '> `채널 전체 문제`\n'
if os.path.isfile(CACHE_DEFAULT_URL + channel + '.json'):
channelRaw = open(CACHE_DEFAULT_URL + channel + '.json').read()
cdat = json.loads(channelRaw)
else:
return '진행 중인 퀴즈가 없음. `!도움 퀴즈`'
msg += ('> *['+cdat['category']+']---- ' +
str(cdat['q_num']) + '번 문제 || 총 ' +
str(cdat['q_max']) + '문제 중 ' +
str(len(cdat['solved']) + 1) +
'개 째... || 답안 제출법:* `!정답 <답안>`\n```' +
cdat['question'] + '```')
return msg
@on_command(['!퀴즈'])
def run(robot, channel, tokens, user, command):
'''문제 내드림'''
info_file = CACHE_DEFAULT_URL + channel + '.json'
nickname = get_nickname(user)
msg = ''
if len(tokens) < 1:
return channel, get_message(channel)
if tokens[0] in ['등록', '추가']:
if len(tokens) != 4:
return channel, '자세한 사용법은...(`!도움 퀴즈`)'
quizFile = CACHE_CATEGORY_URL + tokens[1] + '.json'
qdat = {}
if os.path.isfile(quizFile):
qdat = json.loads(open(quizFile).read())
qdat['q_num'] += 1
qdat['question'].append(tokens[2])
qdat['answer'].append(tokens[3])
qdat['user'].append(nickname)
qdat['time'].append(strftime('%Y-%m-%d %H:%M:%S', localtime()))
else:
qdat['q_num'] = 1
qdat['question'] = [tokens[2]]
qdat['answer'] = [tokens[3]]
qdat['user'] = [nickname]
qdat['time'] = [strftime('%Y-%m-%d %H:%M:%S', localtime())]
with open(quizFile, 'w') as fp:
json.dump(qdat, fp, indent=4)
msg = tokens[1] + '에 관한 문제가 추가됨'
elif tokens[0] == '수정':
if len(tokens) != 5:
return channel, '자세한 사용법은...(`!도움 퀴즈`)'
quizFile = CACHE_CATEGORY_URL + tokens[1] + '.json'
qdat = json.loads(open(quizFile).read())
if not isNumber(tokens[2]):
return channel, '자세한 사용법은...(`!도움 퀴즈`)'
idx = int(tokens[2]) - 1
quiz['question'][idx] = tokens[3]
quiz['answer'][idx] = tokens[4]
quiz['user'][idx] = nickname
quiz['time'][idx] = strftime('%Y-%m-%d %H:%M:%S', localtime())
with open(quizFile, 'w') as fp:
json.dump(qdat, fp, indent=4)
msg = tokens[1] + '에 관한 ' + tokens[2] + '번 문제가 수정됨'
elif tokens[0] == '포기':
if channel[0] == 'D':
os.remove(info_file)
return channel, '진행중인 퀴즈를 포기함'
cdat = json.loads(open(info_file).read())
if user in cdat['give_up']:
return channel, '이미 포기에 투표함'
if len(cdat['give_up']) < 2:
cdat['give_up'].append(user)
with open(info_file, 'w') as fp:
json.dump(cdat, fp, indent=4)
return channel, str(3 - len(cdat['give_up']))+'명 더 필요함'
os.remove(info_file)
msg = '진행중인 퀴즈를 포기함'
elif tokens[0] == '조회':
if len(tokens) < 2:
return channel, '자세한 사용법은...(`!도움 퀴즈`)'
if channel[0] == 'C':
return channel, '채널에선 사용할 수 없음'
quizFile = CACHE_CATEGORY_URL + tokens[1] + '.json'
qdat = json.loads(open(quizFile).read())
msg = tokens[1] + '에는 총 ' + str(qdat['q_num']) + '개의 문제가 있음'
for idx, question in enumerate(qdat['question']):
msg += '\n*' + str(idx + 1) + '.* ' + question
elif tokens[0] == '문제집':
all_file = os.listdir(CACHE_CATEGORY_URL)
quiz_list = list(map(lambda x: os.path.splitext(x)[0], all_file))
msg = '>*여태 등록된 문제집들*\n' + ' || '.join(quiz_list)
elif tokens[0] == '시작':
if len(tokens) < 2:
return channel, '자세한 사용법은...(`!도움 퀴즈`)'
if os.path.isfile(info_file):
return channel, '이미 진행중인 문제집이 있음. `!퀴즈`'
if not os.path.isfile(CACHE_CATEGORY_URL + tokens[1] + '.json'):
return channel, '그런 문제집은 없음.'
quizRaw = open(CACHE_CATEGORY_URL + tokens[1] + '.json').read()
qdat = json.loads(quizRaw)
rand_num = random.randrange(0, qdat['q_num'])
question = qdat['question'][rand_num]
answer = qdat['answer'][rand_num]
cdat = {}
cdat['name'] = nickname
cdat['start_time'] = strftime('%Y %m %d %H %M %S', localtime())
cdat['last_solved'] = int(round(time.time() * 1000))
cdat['solved'] = []
cdat['correct'] = 0
cdat['give_up'] = []
cdat['skip_count'] = []
cdat['correct_user'] = []
cdat['correct_cnt'] = []
cdat['q_num'] = rand_num + 1
if len(tokens) == 3:
cdat['q_max'] = int(tokens[2])
else:
cdat['q_max'] = qdat['q_num']
cdat['question'] = question
cdat['answer'] = answer
cdat['category'] = tokens[1]
with open(info_file, 'w') as fp:
json.dump(cdat, fp, indent=4)
msg = get_message(channel)
elif tokens[0] == '패스':
if not os.path.isfile(info_file):
return channel, '자세한 사용법은... `!퀴즈`'
cdat = json.loads(open(info_file).read())
if user in cdat['skip_count']:
return channel, '이미 패스에 투표함'
if len(cdat['skip_count']) < 1:
cdat['skip_count'].append(user)
with open(info_file, 'w') as fp:
json.dump(cdat, fp, indent=4)
return channel, str(2 - len(cdat['skip_count'])) + '명 더 필요함'
quizRaw = open(CACHE_CATEGORY_URL + cdat['category'] + '.json').read()
qdat = json.loads(quizRaw)
answer, hint = get_answer(channel)
msg = '정답은 `'+answer+'` '+hint+' (출제:'+qdat['user'][cdat['q_num']-1][:1]+'·'+qdat['user'][cdat['q_num']-1][1:]+')\n'
get_random_question(channel)
msg += get_message(channel)
else:
msg = '자세한 사용법은...(`!도움 퀴즈`)'
return channel, msg
|
mit
| -6,365,272,749,459,497,000 | 36.55102 | 124 | 0.519429 | false |
WillisXChen/django-oscar
|
oscar/lib/python2.7/site-packages/debug_toolbar/settings.py
|
1
|
8768
|
from __future__ import absolute_import, unicode_literals
import warnings
from django.conf import settings
from django.utils import six
from debug_toolbar.compat import import_module
# Always import this module as follows:
# from debug_toolbar import settings [as dt_settings]
# Don't import directly CONFIG or PANELs, or you will miss changes performed
# with override_settings in tests.
CONFIG_DEFAULTS = {
# Toolbar options
'DISABLE_PANELS': set(['debug_toolbar.panels.redirects.RedirectsPanel']),
'INSERT_BEFORE': '</body>',
'JQUERY_URL': '//ajax.googleapis.com/ajax/libs/jquery/2.1.0/jquery.min.js',
'RENDER_PANELS': None,
'RESULTS_CACHE_SIZE': 10,
'ROOT_TAG_EXTRA_ATTRS': '',
'SHOW_COLLAPSED': False,
'SHOW_TOOLBAR_CALLBACK': 'debug_toolbar.middleware.show_toolbar',
# Panel options
'EXTRA_SIGNALS': [],
'ENABLE_STACKTRACES': True,
'HIDE_IN_STACKTRACES': (
'socketserver' if six.PY3 else 'SocketServer',
'threading',
'wsgiref',
'debug_toolbar',
'django',
),
'PROFILER_MAX_DEPTH': 10,
'SHOW_TEMPLATE_CONTEXT': True,
'SQL_WARNING_THRESHOLD': 500, # milliseconds
}
USER_CONFIG = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {})
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_CONFIG = {
'RESULTS_STORE_SIZE': 'RESULTS_CACHE_SIZE',
'ROOT_TAG_ATTRS': 'ROOT_TAG_EXTRA_ATTRS',
'HIDDEN_STACKTRACE_MODULES': 'HIDE_IN_STACKTRACES'
}
for old_name, new_name in _RENAMED_CONFIG.items():
if old_name in USER_CONFIG:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_CONFIG "
"setting." % (old_name, new_name), DeprecationWarning)
USER_CONFIG[new_name] = USER_CONFIG.pop(old_name)
if 'HIDE_DJANGO_SQL' in USER_CONFIG:
warnings.warn(
"HIDE_DJANGO_SQL was removed. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG.pop('HIDE_DJANGO_SQL')
if 'TAG' in USER_CONFIG:
warnings.warn(
"TAG was replaced by INSERT_BEFORE. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG['INSERT_BEFORE'] = '</%s>' % USER_CONFIG.pop('TAG')
CONFIG = CONFIG_DEFAULTS.copy()
CONFIG.update(USER_CONFIG)
PANELS_DEFAULTS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
try:
PANELS = list(settings.DEBUG_TOOLBAR_PANELS)
except AttributeError:
PANELS = PANELS_DEFAULTS
else:
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_PANELS = {
'debug_toolbar.panels.version.VersionDebugPanel':
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerDebugPanel':
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings_vars.SettingsDebugPanel':
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel':
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel':
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLDebugPanel':
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.template.TemplateDebugPanel':
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CacheDebugPanel':
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalDebugPanel':
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logger.LoggingDebugPanel':
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.InterceptRedirectsDebugPanel':
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingDebugPanel':
'debug_toolbar.panels.profiling.ProfilingPanel',
}
for index, old_panel in enumerate(PANELS):
new_panel = _RENAMED_PANELS.get(old_panel)
if new_panel is not None:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_PANELS "
"setting." % (old_panel, new_panel), DeprecationWarning)
PANELS[index] = new_panel
if 'INTERCEPT_REDIRECTS' in USER_CONFIG:
warnings.warn(
"INTERCEPT_REDIRECTS is deprecated. Please use the "
"DISABLE_PANELS config in the "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
if USER_CONFIG['INTERCEPT_REDIRECTS']:
if 'debug_toolbar.panels.redirects.RedirectsPanel' \
in CONFIG['DISABLE_PANELS']:
# RedirectsPanel should be enabled
try:
CONFIG['DISABLE_PANELS'].remove(
'debug_toolbar.panels.redirects.RedirectsPanel'
)
except KeyError:
# We wanted to remove it, but it didn't exist. This is fine
pass
elif 'debug_toolbar.panels.redirects.RedirectsPanel' \
not in CONFIG['DISABLE_PANELS']:
# RedirectsPanel should be disabled
CONFIG['DISABLE_PANELS'].add(
'debug_toolbar.panels.redirects.RedirectsPanel'
)
PATCH_SETTINGS = getattr(settings, 'DEBUG_TOOLBAR_PATCH_SETTINGS', settings.DEBUG)
# The following functions can monkey-patch settings automatically. Several
# imports are placed inside functions to make it safe to import this module.
def check_middleware():
from django.middleware.gzip import GZipMiddleware
from debug_toolbar.middleware import DebugToolbarMiddleware
gzip_index = None
debug_toolbar_index = None
# Determine the indexes which gzip and/or the toolbar are installed at
for i, middleware in enumerate(settings.MIDDLEWARE_CLASSES):
if is_middleware_class(GZipMiddleware, middleware):
gzip_index = i
elif is_middleware_class(DebugToolbarMiddleware, middleware):
debug_toolbar_index = i
# If the toolbar appears before the gzip index, raise a warning
if gzip_index is not None and debug_toolbar_index < gzip_index:
warnings.warn(
"Please use an explicit setup with the "
"debug_toolbar.middleware.DebugToolbarMiddleware "
"after django.middleware.gzip.GZipMiddlware "
"in MIDDLEWARE_CLASSES.", Warning)
def is_middleware_class(middleware_class, middleware_path):
# This could be replaced by import_by_path in Django >= 1.6.
try:
mod_path, cls_name = middleware_path.rsplit('.', 1)
mod = import_module(mod_path)
middleware_cls = getattr(mod, cls_name)
except (AttributeError, ImportError, ValueError):
return
return issubclass(middleware_cls, middleware_class)
def is_toolbar_middleware_installed():
from debug_toolbar.middleware import DebugToolbarMiddleware
return any(is_middleware_class(DebugToolbarMiddleware, middleware)
for middleware in settings.MIDDLEWARE_CLASSES)
def prepend_to_setting(setting_name, value):
"""Insert value at the beginning of a list or tuple setting."""
values = getattr(settings, setting_name)
# Make a list [value] or tuple (value,)
value = type(values)((value,))
setattr(settings, setting_name, value + values)
def patch_internal_ips():
if not settings.INTERNAL_IPS:
prepend_to_setting('INTERNAL_IPS', '127.0.0.1')
prepend_to_setting('INTERNAL_IPS', '::1')
def patch_middleware_classes():
if not is_toolbar_middleware_installed():
prepend_to_setting('MIDDLEWARE_CLASSES',
'debug_toolbar.middleware.DebugToolbarMiddleware')
def patch_root_urlconf():
from django.conf.urls import include, url
from django.core.urlresolvers import clear_url_caches, reverse, NoReverseMatch
import debug_toolbar
try:
reverse('djdt:render_panel')
except NoReverseMatch:
urlconf_module = import_module(settings.ROOT_URLCONF)
urlconf_module.urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlconf_module.urlpatterns
clear_url_caches()
def patch_all():
patch_internal_ips()
patch_middleware_classes()
patch_root_urlconf()
|
bsd-3-clause
| -511,903,050,244,851,650 | 36.793103 | 82 | 0.676551 | false |
Molecular-Image-Recognition/Molecular-Image-Recognition
|
code/rmgpy/molecule/draw.py
|
1
|
72901
|
#!/usr/bin/env python
# encoding: utf-8
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2017 Prof. William H. Green (whgreen@mit.edu),
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module provides functionality for automatic two-dimensional drawing of the
`skeletal formulae <http://en.wikipedia.org/wiki/Skeletal_formula>`_ of a wide
variety of organic and inorganic molecules. The general method for creating
these drawings is to utilize the :meth:`draw()` method of the :class:`Molecule`
you wish to draw; this wraps a call to :meth:`MoleculeDrawer.draw()`, where the
molecule drawing algorithm begins. Advanced use may require use of the
:class:`MoleculeDrawer` class directly.
The `Cairo <http://cairographics.org/>`_ 2D graphics library is used to create
the drawings. The :class:`MoleculeDrawer` class module will fail gracefully if
Cairo is not installed.
The implementation uses the 2D coordinate generation of rdKit to find coordinates,
then uses Cairo to render the atom.
"""
import math
import numpy
import os.path
import re
import logging
from rmgpy.qm.molecule import Geometry
from rdkit.Chem import AllChem
from numpy.linalg import LinAlgError
################################################################################
def createNewSurface(format, target=None, width=1024, height=768):
"""
Create a new surface of the specified `format`:
"png" for :class:`ImageSurface`
"svg" for :class:`SVGSurface`
"pdf" for :class:`PDFSurface`
"ps" for :class:`PSSurface`
The surface will be written to the `target` parameter , which can be a
path to save the surface to, or file-like object with a `write()` method.
You can also optionally specify the `width` and `height` of the generated
surface if you know what it is; otherwise a default size of 1024 by 768 is
used.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
format = format.lower()
if format == 'png':
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width), int(height))
elif format == 'svg':
surface = cairo.SVGSurface(target, width, height)
elif format == 'pdf':
surface = cairo.PDFSurface(target, width, height)
elif format == 'ps':
surface = cairo.PSSurface(target, width, height)
else:
raise ValueError('Invalid value "{0}" for type parameter; valid values are "png", "svg", "pdf", and "ps".'.format(type))
return surface
################################################################################
class MoleculeDrawer:
"""
This class provides functionality for drawing the skeletal formula of
molecules using the Cairo 2D graphics engine. The most common use case is
simply::
MoleculeDrawer().draw(molecule, format='png', path='molecule.png')
where ``molecule`` is the :class:`Molecule` object to draw. You can also
pass a dict of options to the constructor to affect how the molecules are
drawn.
"""
def __init__(self, options=None):
self.options = {
'fontFamily': 'sans',
'fontSizeNormal': 12,
'fontSizeSubscript': 8,
'bondLength': 24,
'padding': 2,
}
if options: self.options.update(options)
self.clear()
def clear(self):
self.molecule = None
self.cycles = None
self.ringSystems = None
self.coordinates = None
self.symbols = None
self.implicitHydrogens = None
self.left = 0.0
self.top = 0.0
self.right = 0.0
self.bottom = 0.0
self.surface = None
self.cr = None
def draw(self, molecule, format, target=None):
"""
Draw the given `molecule` using the given image `format` - pdf, svg, ps, or
png. If `path` is given, the drawing is saved to that location on disk. The
`options` dict is an optional set of key-value pairs that can be used to
control the generated drawing.
This function returns the Cairo surface and context used to create the
drawing, as well as a bounding box for the molecule being drawn as the
tuple (`left`, `top`, `width`, `height`).
"""
# The Cairo 2D graphics library (and its Python wrapper) is required for
# the molecule drawing algorithm
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.error('Cairo not found; molecule will not be drawn.')
return
# Make a copy of the molecule so we don't modify the original
self.molecule = molecule.copy(deep=True)
# Remove all unlabeled hydrogen atoms from the copied atoms and bonds, as
# they are not drawn
# However, if this would remove all atoms, then don't remove any
atomsToRemove = []
self.implicitHydrogens = {}
for atom in self.molecule.atoms:
if atom.isHydrogen() and atom.label == '': atomsToRemove.append(atom)
if len(atomsToRemove) < len(self.molecule.atoms):
for atom in atomsToRemove:
for atom2 in atom.bonds:
try:
self.implicitHydrogens[atom2] += 1
except KeyError:
self.implicitHydrogens[atom2] = 1
self.molecule.removeAtom(atom)
# Generate information about any cycles present in the molecule, as
# they will need special attention
self.__findRingGroups()
# Handle carbon monoxide special case
if self.molecule.getFormula() == 'CO' and len(atomsToRemove) == 0:
# RDKit does not accept atom type Ot
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['CO']
self.molecule.atoms[0].charge = 0 # don't label the C as - if you're not drawing the O with a +
self.coordinates = numpy.array([[0,0]], numpy.float64)
else:
# Generate the coordinates to use to draw the molecule
try:
# before getting coordinates, make all bonds single and then
# replace the bonds after generating coordinates. This avoids
# bugs with RDKit
old_bond_dictionary = self.__make_single_bonds()
self.__generateCoordinates()
self.__replace_bonds(old_bond_dictionary)
# Generate labels to use
self.__generateAtomLabels()
except (ValueError, numpy.linalg.LinAlgError), e:
logging.error('Error while drawing molecule {0}: {1}'.format(molecule.toSMILES(), e))
import sys, traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exc()
return None, None, None
except KeyError:
logging.error('KeyError occured when drawing molecule, likely because' +\
' the molecule contained non-standard bond orders in the' +\
' getResonanceHybrid method. These cannot be drawn since' +\
' they cannot be sent to RDKit for coordinate placing.')
raise
self.coordinates[:,1] *= -1
self.coordinates *= self.options['bondLength']
# Handle some special cases
if self.symbols == ['H','H']:
# Render as H2 instead of H-H
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['H2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['O', 'O']:
# Render as O2 instead of O-O
self.molecule.removeAtom(self.molecule.atoms[-1])
self.molecule.atoms[0].radicalElectrons = 0
self.symbols = ['O2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['OH', 'O'] or self.symbols == ['O', 'OH']:
# Render as HO2 instead of HO-O or O-OH
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['O2H']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['OH', 'OH']:
# Render as H2O2 instead of HO-OH or O-OH
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['O2H2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
elif self.symbols == ['O', 'C', 'O']:
# Render as CO2 instead of O=C=O
self.molecule.removeAtom(self.molecule.atoms[0])
self.molecule.removeAtom(self.molecule.atoms[-1])
self.symbols = ['CO2']
self.coordinates = numpy.array([[0,0]], numpy.float64)
# Create a dummy surface to draw to, since we don't know the bounding rect
# We will copy this to another surface with the correct bounding rect
surface0 = createNewSurface(format=format, target=None)
cr0 = cairo.Context(surface0)
# Render using Cairo
self.render(cr0)
# Create the real surface with the appropriate size
xoff = self.left
yoff = self.top
width = self.right - self.left
height = self.bottom - self.top
self.surface = createNewSurface(format=format, target=target, width=width, height=height)
self.cr = cairo.Context(self.surface)
# Draw white background
self.cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
self.cr.paint()
self.render(self.cr, offset=(-xoff,-yoff))
if target is not None:
# Finish Cairo drawing
# Save PNG of drawing if appropriate
if isinstance(target, str):
ext = os.path.splitext(target)[1].lower()
if ext == '.png':
self.surface.write_to_png(target)
else:
self.surface.finish()
else:
self.surface.finish()
return self.surface, self.cr, (xoff, yoff, width, height)
def __findRingGroups(self):
"""
Find all of the cycles in the current molecule, and group them into
sets of adjacent cycles.
"""
# Find all of the cycles in the molecule
self.cycles = self.molecule.getSmallestSetOfSmallestRings()
self.ringSystems = []
# If the molecule contains cycles, find them and group them
if len(self.cycles) > 0:
# Split the list of cycles into groups
# Each atom in the molecule should belong to exactly zero or one such groups
for cycle in self.cycles:
found = False
for ringSystem in self.ringSystems:
for ring in ringSystem:
if any([atom in ring for atom in cycle]) and not found:
ringSystem.append(cycle)
found = True
if not found:
self.ringSystems.append([cycle])
def __generateCoordinates(self):
"""
Generate the 2D coordinates to be used when drawing the current
molecule. The function uses rdKits 2D coordinate generation.
"""
atoms = self.molecule.atoms
Natoms = len(atoms)
flag_charge = 0
for atom in self.molecule.atoms:
if atom.charge != 0: #atomType.label in ['N5s','N5d','N5dd','N5t','N5b']:
flag_charge = 1
break
# Initialize array of coordinates
self.coordinates = coordinates = numpy.zeros((Natoms, 2))
if flag_charge == 1:
# If there are only one or two atoms to draw, then determining the
# coordinates is trivial
if Natoms == 1:
self.coordinates[0,:] = [0.0, 0.0]
return self.coordinates
elif Natoms == 2:
self.coordinates[0,:] = [-0.5, 0.0]
self.coordinates[1,:] = [0.5, 0.0]
return self.coordinates
if len(self.cycles) > 0:
# Cyclic molecule
backbone = self.__findCyclicBackbone()
self.__generateRingSystemCoordinates(backbone)
# Flatten backbone so that it contains a list of the atoms in the
# backbone, rather than a list of the cycles in the backbone
backbone = list(set([atom for cycle in backbone for atom in cycle]))
else:
# Straight chain molecule
backbone = self.__findStraightChainBackbone()
self.__generateStraightChainCoordinates(backbone)
# If backbone is linear, then rotate so that the bond is parallel to the
# horizontal axis
vector0 = coordinates[atoms.index(backbone[1]),:] - coordinates[atoms.index(backbone[0]),:]
for i in range(2, len(backbone)):
vector = coordinates[atoms.index(backbone[i]),:] - coordinates[atoms.index(backbone[i-1]),:]
if numpy.linalg.norm(vector - vector0) > 1e-4:
break
else:
angle = math.atan2(vector0[0], vector0[1]) - math.pi / 2
rot = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
coordinates = numpy.dot(coordinates, rot)
# Center backbone at origin
xmin = numpy.min(coordinates[:,0])
xmax = numpy.max(coordinates[:,0])
ymin = numpy.min(coordinates[:,1])
ymax = numpy.max(coordinates[:,1])
xmid = 0.5 * (xmax + xmin)
ymid = 0.5 * (ymax + ymin)
for atom in backbone:
index = atoms.index(atom)
coordinates[index,0] -= xmid
coordinates[index,1] -= ymid
# We now proceed by calculating the coordinates of the functional groups
# attached to the backbone
# Each functional group is independent, although they may contain further
# branching and cycles
# In general substituents should try to grow away from the origin to
# minimize likelihood of overlap
self.__generateNeighborCoordinates(backbone)
return coordinates
else:
# Use rdkit 2D coordinate generation:
# Generate the RDkit molecule from the RDkit molecule, use geometry
# in order to match the atoms in the rdmol with the atoms in the
# RMG molecule (which is required to extract coordinates).
self.geometry = Geometry(None, None, self.molecule, None)
rdmol, rdAtomIdx = self.geometry.rd_build()
AllChem.Compute2DCoords(rdmol)
# Extract the coordinates from each atom.
for atom in atoms:
index = rdAtomIdx[atom]
point = rdmol.GetConformer(0).GetAtomPosition(index)
coordinates[index,:]= [point.x*0.6, point.y*0.6]
# RDKit generates some molecules more vertically than horizontally,
# Especially linear ones. This will reflect any molecule taller than
# it is wide across the line y=x
ranges = numpy.ptp(coordinates, axis = 0)
if ranges[1] > ranges[0]:
temp = numpy.copy(coordinates)
coordinates[:,0] = temp[:,1]
coordinates[:,1] = temp[:,0]
return coordinates
def __findCyclicBackbone(self):
"""
Return a set of atoms to use as the "backbone" of the molecule. For
cyclics this is simply the largest ring system.
"""
count = [len(set([atom for ring in ringSystem for atom in ring])) for ringSystem in self.ringSystems]
index = 0
for i in range(1, len(self.ringSystems)):
if count[i] > count[index]:
index = i
return self.ringSystems[index]
def __findStraightChainBackbone(self):
"""
Return a set of atoms to use as the "backbone" of the molecule. For
non-cyclics this is the largest straight chain between atoms. If carbon
atoms are present, then we define the backbone only in terms of them.
"""
# Find the terminal atoms - those that only have one explicit bond
terminalAtoms = [atom for atom in self.molecule.atoms if len(atom.bonds) == 1]
assert len(terminalAtoms) >= 2
# Starting from each terminal atom, find the longest straight path to
# another terminal
# The longest found is the backbone
backbone = []
paths = []
for atom in terminalAtoms:
paths.extend(self.__findStraightChainPaths([atom]))
# Remove any paths that don't end in a terminal atom
# (I don't think this should remove any!)
paths = [path for path in paths if path[-1] in terminalAtoms]
# Remove all paths shorter than the maximum
length = max([len(path) for path in paths])
paths = [path for path in paths if len(path) == length]
# Prefer the paths with the most carbon atoms
carbons = [sum([1 for atom in path if atom.isCarbon()]) for path in paths]
maxCarbons = max(carbons)
paths = [path for path, carbon in zip(paths, carbons) if carbon == maxCarbons]
# At this point we could choose any remaining path, so simply choose the first
backbone = paths[0]
assert len(backbone) > 1
assert backbone[0] in terminalAtoms
assert backbone[-1] in terminalAtoms
return backbone
def __findStraightChainPaths(self, atoms0):
"""
Finds the paths containing the list of atoms `atoms0` in the
current molecule. The atoms are assumed to already be in a path, with
``atoms0[0]`` being a terminal atom.
"""
atom1 = atoms0[-1]
paths = []
for atom2 in atom1.bonds:
if atom2 not in atoms0:
atoms = atoms0[:]
atoms.append(atom2)
if not self.molecule.isAtomInCycle(atom2):
paths.extend(self.__findStraightChainPaths(atoms))
if len(paths) == 0:
paths.append(atoms0[:])
return paths
def __generateRingSystemCoordinates(self, atoms):
"""
For a ring system composed of the given cycles of `atoms`, update the
coordinates of each atom in the system.
"""
coordinates = self.coordinates
atoms = atoms[:]
processed = []
# Lay out largest cycle in ring system first
cycle = atoms[0]
for cycle0 in atoms[1:]:
if len(cycle0) > len(cycle):
cycle = cycle0
angle = - 2 * math.pi / len(cycle)
radius = 1.0 / (2 * math.sin(math.pi / len(cycle)))
for i, atom in enumerate(cycle):
index = self.molecule.atoms.index(atom)
coordinates[index,:] = [math.cos(math.pi / 2 + i * angle), math.sin(math.pi / 2 + i * angle)]
coordinates[index,:] *= radius
atoms.remove(cycle)
processed.append(cycle)
# If there are other cycles, then try to lay them out as well
while len(atoms) > 0:
# Find the largest cycle that shares one or two atoms with a ring that's
# already been processed
cycle = None
for cycle0 in atoms:
for cycle1 in processed:
count = sum([1 for atom in cycle0 if atom in cycle1])
if (count == 1 or count == 2):
if cycle is None or len(cycle0) > len(cycle): cycle = cycle0
cycle0 = cycle1
atoms.remove(cycle)
# Shuffle atoms in cycle such that the common atoms come first
# Also find the average center of the processed cycles that touch the
# current cycles
found = False
commonAtoms = []
count = 0
center0 = numpy.zeros(2, numpy.float64)
for cycle1 in processed:
found = False
for atom in cycle1:
if atom in cycle and atom not in commonAtoms:
commonAtoms.append(atom)
found = True
if found:
center1 = numpy.zeros(2, numpy.float64)
for atom in cycle1:
center1 += coordinates[cycle1.index(atom),:]
center1 /= len(cycle1)
center0 += center1
count += 1
center0 /= count
if len(commonAtoms) > 1:
index0 = cycle.index(commonAtoms[0])
index1 = cycle.index(commonAtoms[1])
if (index0 == 0 and index1 == len(cycle) - 1) or (index1 == 0 and index0 == len(cycle) - 1):
cycle = cycle[-1:] + cycle[0:-1]
if cycle.index(commonAtoms[1]) < cycle.index(commonAtoms[0]):
cycle.reverse()
index = cycle.index(commonAtoms[0])
cycle = cycle[index:] + cycle[0:index]
# Determine center of cycle based on already-assigned positions of
# common atoms (which won't be changed)
if len(commonAtoms) == 1 or len(commonAtoms) == 2:
# Center of new cycle is reflection of center of adjacent cycle
# across common atom or bond
center = numpy.zeros(2, numpy.float64)
for atom in commonAtoms:
center += coordinates[self.molecule.atoms.index(atom),:]
center /= len(commonAtoms)
vector = center - center0
center += vector
radius = 1.0 / (2 * math.sin(math.pi / len(cycle)))
else:
# Use any three points to determine the point equidistant from these
# three; this is the center
index0 = self.molecule.atoms.index(commonAtoms[0])
index1 = self.molecule.atoms.index(commonAtoms[1])
index2 = self.molecule.atoms.index(commonAtoms[2])
A = numpy.zeros((2,2), numpy.float64)
b = numpy.zeros((2), numpy.float64)
A[0,:] = 2 * (coordinates[index1,:] - coordinates[index0,:])
A[1,:] = 2 * (coordinates[index2,:] - coordinates[index0,:])
b[0] = coordinates[index1,0]**2 + coordinates[index1,1]**2 - coordinates[index0,0]**2 - coordinates[index0,1]**2
b[1] = coordinates[index2,0]**2 + coordinates[index2,1]**2 - coordinates[index0,0]**2 - coordinates[index0,1]**2
center = numpy.linalg.solve(A, b)
radius = numpy.linalg.norm(center - coordinates[index0,:])
startAngle = 0.0; endAngle = 0.0
if len(commonAtoms) == 1:
# We will use the full 360 degrees to place the other atoms in the cycle
startAngle = math.atan2(-vector[1], vector[0])
endAngle = startAngle + 2 * math.pi
elif len(commonAtoms) >= 2:
# Divide other atoms in cycle equally among unused angle
vector = coordinates[cycle.index(commonAtoms[-1]),:] - center
startAngle = math.atan2(vector[1], vector[0])
vector = coordinates[cycle.index(commonAtoms[0]),:] - center
endAngle = math.atan2(vector[1], vector[0])
# Place remaining atoms in cycle
if endAngle < startAngle:
endAngle += 2 * math.pi
dAngle = (endAngle - startAngle) / (len(cycle) - len(commonAtoms) + 1)
else:
endAngle -= 2 * math.pi
dAngle = (endAngle - startAngle) / (len(cycle) - len(commonAtoms) + 1)
count = 1
for i in range(len(commonAtoms), len(cycle)):
angle = startAngle + count * dAngle
index = self.molecule.atoms.index(cycle[i])
# Check that we aren't reassigning any atom positions
# This version assumes that no atoms belong at the origin, which is
# usually fine because the first ring is centered at the origin
if numpy.linalg.norm(coordinates[index,:]) < 1e-4:
vector = numpy.array([math.cos(angle), math.sin(angle)], numpy.float64)
coordinates[index,:] = center + radius * vector
count += 1
# We're done assigning coordinates for this cycle, so mark it as processed
processed.append(cycle)
def __generateStraightChainCoordinates(self, atoms):
"""
Update the coordinates for the linear straight chain of `atoms` in
the current molecule.
"""
coordinates = self.coordinates
# First atom goes at origin
index0 = self.molecule.atoms.index(atoms[0])
coordinates[index0,:] = [0.0, 0.0]
# Second atom goes on x-axis (for now; this could be improved!)
index1 = self.molecule.atoms.index(atoms[1])
vector = numpy.array([1.0, 0.0], numpy.float64)
if atoms[0].bonds[atoms[1]].isTriple():
rotatePositive = False
else:
rotatePositive = True
rot = numpy.array([[math.cos(-math.pi / 6), math.sin(-math.pi / 6)], [-math.sin(-math.pi / 6), math.cos(-math.pi / 6)]], numpy.float64)
vector = numpy.array([1.0, 0.0], numpy.float64)
vector = numpy.dot(rot, vector)
coordinates[index1,:] = coordinates[index0,:] + vector
# Other atoms
for i in range(2, len(atoms)):
atom0 = atoms[i-2]
atom1 = atoms[i-1]
atom2 = atoms[i]
index1 = self.molecule.atoms.index(atom1)
index2 = self.molecule.atoms.index(atom2)
bond0 = atom0.bonds[atom1]
bond = atom1.bonds[atom2]
# Angle of next bond depends on the number of bonds to the start atom
numBonds = len(atom1.bonds)
if numBonds == 2:
if (bond0.isTriple() or bond.isTriple()) or (bond0.isDouble() and bond.isDouble()):
# Rotate by 0 degrees towards horizontal axis (to get angle of 180)
angle = 0.0
else:
# Rotate by 60 degrees towards horizontal axis (to get angle of 120)
angle = math.pi / 3
elif numBonds == 3:
# Rotate by 60 degrees towards horizontal axis (to get angle of 120)
angle = math.pi / 3
elif numBonds == 4:
# Rotate by 0 degrees towards horizontal axis (to get angle of 90)
angle = 0.0
elif numBonds == 5:
# Rotate by 36 degrees towards horizontal axis (to get angle of 144)
angle = math.pi / 5
elif numBonds == 6:
# Rotate by 0 degrees towards horizontal axis (to get angle of 180)
angle = 0.0
# Determine coordinates for atom
if angle != 0:
if not rotatePositive: angle = -angle
rot = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
vector = numpy.dot(rot, vector)
rotatePositive = not rotatePositive
coordinates[index2,:] = coordinates[index1,:] + vector
def __generateNeighborCoordinates(self, backbone):
"""
Recursively update the coordinates for the atoms immediately adjacent
to the atoms in the molecular `backbone`.
"""
atoms = self.molecule.atoms
coordinates = self.coordinates
for i in range(len(backbone)):
atom0 = backbone[i]
index0 = atoms.index(atom0)
# Determine bond angles of all previously-determined bond locations for
# this atom
bondAngles = []
for atom1 in atom0.bonds:
index1 = atoms.index(atom1)
if atom1 in backbone:
vector = coordinates[index1,:] - coordinates[index0,:]
angle = math.atan2(vector[1], vector[0])
bondAngles.append(angle)
bondAngles.sort()
bestAngle = 2 * math.pi / len(atom0.bonds)
regular = True
for angle1, angle2 in zip(bondAngles[0:-1], bondAngles[1:]):
if all([abs(angle2 - angle1 - (i+1) * bestAngle) > 1e-4 for i in range(len(atom0.bonds))]):
regular = False
if regular:
# All the bonds around each atom are equally spaced
# We just need to fill in the missing bond locations
# Determine rotation angle and matrix
rot = numpy.array([[math.cos(bestAngle), -math.sin(bestAngle)], [math.sin(bestAngle), math.cos(bestAngle)]], numpy.float64)
# Determine the vector of any currently-existing bond from this atom
vector = None
for atom1 in atom0.bonds:
index1 = atoms.index(atom1)
if atom1 in backbone or numpy.linalg.norm(coordinates[index1,:]) > 1e-4:
vector = coordinates[index1,:] - coordinates[index0,:]
# Iterate through each neighboring atom to this backbone atom
# If the neighbor is not in the backbone and does not yet have
# coordinates, then we need to determine coordinates for it
for atom1 in atom0.bonds:
if atom1 not in backbone and numpy.linalg.norm(coordinates[atoms.index(atom1),:]) < 1e-4:
occupied = True; count = 0
# Rotate vector until we find an unoccupied location
while occupied and count < len(atom0.bonds):
count += 1; occupied = False
vector = numpy.dot(rot, vector)
for atom2 in atom0.bonds:
index2 = atoms.index(atom2)
if numpy.linalg.norm(coordinates[index2,:] - coordinates[index0,:] - vector) < 1e-4:
occupied = True
coordinates[atoms.index(atom1),:] = coordinates[index0,:] + vector
self.__generateFunctionalGroupCoordinates(atom0, atom1)
else:
# The bonds are not evenly spaced (e.g. due to a ring)
# We place all of the remaining bonds evenly over the reflex angle
startAngle = max(bondAngles)
endAngle = min(bondAngles)
if 0.0 < endAngle - startAngle < math.pi: endAngle += 2 * math.pi
elif 0.0 > endAngle - startAngle > -math.pi: startAngle -= 2 * math.pi
dAngle = (endAngle - startAngle) / (len(atom0.bonds) - len(bondAngles) + 1)
index = 1
for atom1 in atom0.bonds:
if atom1 not in backbone and numpy.linalg.norm(coordinates[atoms.index(atom1),:]) < 1e-4:
angle = startAngle + index * dAngle
index += 1
vector = numpy.array([math.cos(angle), math.sin(angle)], numpy.float64)
vector /= numpy.linalg.norm(vector)
coordinates[atoms.index(atom1),:] = coordinates[index0,:] + vector
self.__generateFunctionalGroupCoordinates(atom0, atom1)
def __generateFunctionalGroupCoordinates(self, atom0, atom1):
"""
For the functional group starting with the bond from `atom0` to `atom1`,
generate the coordinates of the rest of the functional group. `atom0` is
treated as if a terminal atom. `atom0` and `atom1` must already have their
coordinates determined. `atoms` is a list of the atoms to be drawn, `bonds`
is a dictionary of the bonds to draw, and `coordinates` is an array of the
coordinates for each atom to be drawn. This function is designed to be
recursive.
"""
atoms = self.molecule.atoms
coordinates = self.coordinates
index0 = atoms.index(atom0)
index1 = atoms.index(atom1)
# Determine the vector of any currently-existing bond from this atom
# (We use the bond to the previous atom here)
vector = coordinates[index0,:] - coordinates[index1,:]
bondAngle = math.atan2(vector[1], vector[0])
# Check to see if atom1 is in any cycles in the molecule
ringSystem = None
for ringSys in self.ringSystems:
if any([atom1 in ring for ring in ringSys]):
ringSystem = ringSys
if ringSystem is not None:
# atom1 is part of a ring system, so we need to process the entire
# ring system at once
# Generate coordinates for all atoms in the ring system
self.__generateRingSystemCoordinates(ringSystem)
cycleAtoms = list(set([atom for ring in ringSystem for atom in ring]))
coordinates_cycle = numpy.zeros_like(self.coordinates)
for atom in cycleAtoms:
coordinates_cycle[atoms.index(atom),:] = coordinates[atoms.index(atom),:]
# Rotate the ring system coordinates so that the line connecting atom1
# and the center of mass of the ring is parallel to that between
# atom0 and atom1
center = numpy.zeros(2, numpy.float64)
for atom in cycleAtoms:
center += coordinates_cycle[atoms.index(atom),:]
center /= len(cycleAtoms)
vector0 = center - coordinates_cycle[atoms.index(atom1),:]
angle = math.atan2(vector[1] - vector0[1], vector[0] - vector0[0])
rot = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
coordinates_cycle = numpy.dot(coordinates_cycle, rot)
# Translate the ring system coordinates to the position of atom1
coordinates_cycle += coordinates[atoms.index(atom1),:] - coordinates_cycle[atoms.index(atom1),:]
for atom in cycleAtoms:
coordinates[atoms.index(atom),:] = coordinates_cycle[atoms.index(atom),:]
# Generate coordinates for remaining neighbors of ring system,
# continuing to recurse as needed
self.__generateNeighborCoordinates(cycleAtoms)
else:
# atom1 is not in any rings, so we can continue as normal
# Determine rotation angle and matrix
numBonds = len(atom1.bonds)
angle = 0.0
if numBonds == 2:
bond0, bond = atom1.bonds.values()
if (bond0.isTriple() or bond.isTriple()) or (bond0.isDouble() and bond.isDouble()):
angle = math.pi
else:
angle = 2 * math.pi / 3
# Make sure we're rotating such that we move away from the origin,
# to discourage overlap of functional groups
rot1 = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
rot2 = numpy.array([[math.cos(angle), math.sin(angle)], [-math.sin(angle), math.cos(angle)]], numpy.float64)
vector1 = coordinates[index1,:] + numpy.dot(rot1, vector)
vector2 = coordinates[index1,:] + numpy.dot(rot2, vector)
if bondAngle < -0.5 * math.pi or bondAngle > 0.5 * math.pi:
angle = abs(angle)
else:
angle = -abs(angle)
else:
angle = 2 * math.pi / numBonds
rot = numpy.array([[math.cos(angle), -math.sin(angle)], [math.sin(angle), math.cos(angle)]], numpy.float64)
# Iterate through each neighboring atom to this backbone atom
# If the neighbor is not in the backbone, then we need to determine
# coordinates for it
for atom, bond in atom1.bonds.iteritems():
if atom is not atom0:
occupied = True; count = 0
# Rotate vector until we find an unoccupied location
while occupied and count < len(atom1.bonds):
count += 1; occupied = False
vector = numpy.dot(rot, vector)
for atom2 in atom1.bonds:
index2 = atoms.index(atom2)
if numpy.linalg.norm(coordinates[index2,:] - coordinates[index1,:] - vector) < 1e-4:
occupied = True
coordinates[atoms.index(atom),:] = coordinates[index1,:] + vector
# Recursively continue with functional group
self.__generateFunctionalGroupCoordinates(atom1, atom)
def __generateAtomLabels(self):
"""
Generate the labels to use for each atom in the drawing. In general,
all atoms are labeled with their symbols except carbon. Some carbon
atoms are also labeled in certain circumstances. The labels also
contain any implicit hydrogen atoms (i.e. those hydrogen atoms not
explicitly drawn in the skeletal formula).
"""
atoms = self.molecule.atoms
self.symbols = symbols = [atom.symbol for atom in atoms]
for i in range(len(symbols)):
# Don't label carbon atoms, unless there are only one or two heavy atoms
if symbols[i] == 'C' and len(symbols) > 2:
if len(atoms[i].bonds) > 1 or (atoms[i].radicalElectrons == 0 and atoms[i].charge == 0):
symbols[i] = ''
# Do label atoms that have only double bonds to one or more labeled atoms
changed = True
while changed:
changed = False
for i in range(len(symbols)):
if symbols[i] == '' and all([(bond.isDouble() or bond.isTriple()) for bond in atoms[i].bonds.values()]) and any([symbols[atoms.index(atom)] != '' for atom in atoms[i].bonds]):
symbols[i] = atoms[i].symbol
changed = True
# Add implicit hydrogens
for i in range(len(symbols)):
if symbols[i] != '':
try:
Hcount = self.implicitHydrogens[atoms[i]]
except KeyError:
continue
if Hcount == 1: symbols[i] = symbols[i] + 'H'
elif Hcount > 1: symbols[i] = symbols[i] + 'H{0:d}'.format(Hcount)
return symbols
def render(self, cr, offset=None):
"""
Uses the Cairo graphics library to create a skeletal formula drawing of a
molecule containing the list of `atoms` and dict of `bonds` to be drawn.
The 2D position of each atom in `atoms` is given in the `coordinates` array.
The symbols to use at each atomic position are given by the list `symbols`.
You must specify the Cairo context `cr` to render to.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
coordinates = self.coordinates
atoms = self.molecule.atoms
symbols = self.symbols
drawLonePairs = False
for atom in atoms:
if atom.isNitrogen():
drawLonePairs = True
left = 0.0
top = 0.0
right = 0.0
bottom = 0.0
# Shift coordinates by offset value
if offset is not None:
coordinates[:,0] += offset[0]
coordinates[:,1] += offset[1]
# Draw bonds
for atom1 in atoms:
for atom2, bond in atom1.bonds.items():
index1 = atoms.index(atom1)
index2 = atoms.index(atom2)
if index1 < index2: # So we only draw each bond once
self.__renderBond(index1, index2, bond, cr)
# Draw aromatic bonds
for cycle in self.cycles:
cycleBonds = []
for atom1, atom2 in zip(cycle[0:-1], cycle[1:]):
cycleBonds.append(atom1.bonds[atom2])
cycleBonds.append(cycle[0].bonds[cycle[-1]])
if all([bond.isBenzene() for bond in cycleBonds]):
# We've found an aromatic ring, so draw a circle in the center to represent the benzene bonds
center = numpy.zeros(2, numpy.float64)
for atom in cycle:
index = atoms.index(atom)
center += coordinates[index,:]
center /= len(cycle)
index1 = atoms.index(cycle[0])
index2 = atoms.index(cycle[1])
radius = math.sqrt(
(center[0] - (coordinates[index1,0] + coordinates[index2,0]) / 2)**2 +
(center[1] - (coordinates[index1,1] + coordinates[index2,1]) / 2)**2
) - 4
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(1.0)
cr.set_line_cap(cairo.LINE_CAP_ROUND)
cr.arc(center[0], center[1], radius, 0.0, 2 * math.pi)
cr.stroke()
# Draw atoms
for i, atom in enumerate(atoms):
symbol = symbols[i]
index = atoms.index(atom)
x0, y0 = coordinates[index,:]
vector = numpy.zeros(2, numpy.float64)
for atom2 in atom.bonds:
vector += coordinates[atoms.index(atom2),:] - coordinates[index,:]
heavyFirst = vector[0] <= 0
if len(atoms) == 1 and atoms[0].symbol not in ['C', 'N'] and atoms[0].charge == 0 and atoms[0].radicalElectrons == 0:
# This is so e.g. water is rendered as H2O rather than OH2
heavyFirst = False
cr.set_font_size(self.options['fontSizeNormal'])
x0 += cr.text_extents(symbols[0])[2] / 2.0
atomBoundingRect = self.__renderAtom(symbol, atom, x0, y0, cr, heavyFirst, drawLonePairs)
# Add a small amount of whitespace on all sides
padding = self.options['padding']
self.left -= padding; self.top -= padding; self.right += padding; self.bottom += padding
def __drawLine(self, cr, x1, y1, x2, y2, dashed = False):
"""
Draw a line on the given Cairo context `cr` from (`x1`, `y1`) to
(`x2`,`y2`), and update the bounding rectangle if necessary.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
cairo
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.set_line_width(1.0)
if dashed:
cr.set_dash([3.5])
cr.set_line_cap(cairo.LINE_CAP_ROUND)
cr.move_to(x1, y1); cr.line_to(x2, y2)
cr.stroke()
# remove dashes for next method call
if dashed:
cr.set_dash([])
if x1 < self.left: self.left = x1
if x1 > self.right: self.right = x1
if y1 < self.top: self.top = y1
if y1 > self.bottom: self.bottom = y1
if x2 < self.left: self.left = x2
if x2 > self.right: self.right = x2
if y2 < self.top: self.top = y2
if y2 > self.bottom: self.bottom = y2
def __renderBond(self, atom1, atom2, bond, cr):
"""
Render an individual `bond` between atoms with indices `atom1` and `atom2`
on the Cairo context `cr`.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
bondLength = self.options['bondLength']
# determine if aromatic
isAromatic = False
for cycle in self.cycles:
if self.molecule.atoms[atom1] in cycle and \
self.molecule.atoms[atom2] in cycle:
allBenzenes = True
for index in range(len(cycle)):
if not cycle[index -1].bonds[cycle[index]].isBenzene():
allBenzenes = False
break
if allBenzenes:
isAromatic = True
break
x1, y1 = self.coordinates[atom1,:]
x2, y2 = self.coordinates[atom2,:]
angle = math.atan2(y2 - y1, x2 - x1)
dx = x2 - x1; dy = y2 - y1
du = math.cos(angle + math.pi / 2)
dv = math.sin(angle + math.pi / 2)
if (self.symbols[atom1] != '' or \
self.symbols[atom2] != ''):
if bond.isTriple():
# Draw triple bond centered on bond axis
du *= 3; dv *= 3
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 , y1 , x2 , y2 )
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv)
elif bond.getOrderNum() > 2 and bond.getOrderNum() < 3:
du *= 3; dv *= 3
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 , y1 , x2 , y2 )
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv, dashed = True)
elif bond.isDouble():
# Draw double bond centered on bond axis
du *= 1.6; dv *= 1.6
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv)
elif bond.getOrderNum() > 1 and bond.getOrderNum() < 2 and not isAromatic:
# Draw dashed double bond centered on bond axis
du *= 1.6; dv *= 1.6
self.__drawLine(cr, x1 - du, y1 - dv, x2 - du, y2 - dv)
self.__drawLine(cr, x1 + du, y1 + dv, x2 + du, y2 + dv, dashed=True)
else:
self.__drawLine(cr, x1, y1, x2, y2)
else:
# Draw bond on skeleton
self.__drawLine(cr, x1, y1, x2, y2)
# Draw other bonds
if bond.isDouble():
du *= 3.2; dv *= 3.2; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy)
elif bond.isTriple():
du *= 3; dv *= 3; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 - du + dx, y1 - dv + dy, x2 - du - dx, y2 - dv - dy)
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy)
elif bond.getOrderNum() > 1 and bond.getOrderNum() < 2 and not isAromatic:
du *= 3.2; dv *= 3.2; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy, dashed=True)
elif bond.getOrderNum() > 2 and bond.getOrderNum() < 3:
du *= 3; dv *= 3; dx = 2 * dx / bondLength; dy = 2 * dy / bondLength
self.__drawLine(cr, x1 - du + dx, y1 - dv + dy, x2 - du - dx, y2 - dv - dy)
self.__drawLine(cr, x1 + du + dx, y1 + dv + dy, x2 + du - dx, y2 + dv - dy, dashed=True)
def __renderAtom(self, symbol, atom, x0, y0, cr, heavyFirst=True, drawLonePairs=False):
"""
Render the `label` for an atom centered around the coordinates (`x0`, `y0`)
onto the Cairo context `cr`. If `heavyFirst` is ``False``, then the order
of the atoms will be reversed in the symbol. This method also causes
radical electrons and charges to be drawn adjacent to the rendered symbol.
"""
try:
import cairocffi as cairo
except ImportError:
import cairo
atoms = self.molecule.atoms
if symbol != '':
heavyAtom = symbol[0]
# Split label by atoms
labels = re.findall('[A-Z][a-z]*[0-9]*', symbol)
if not heavyFirst: labels.reverse()
if 'C' not in symbol and 'O' not in symbol and len(atoms) == 1: labels.sort()
symbol = ''.join(labels)
# Determine positions of each character in the symbol
coordinates = []
cr.set_font_size(self.options['fontSizeNormal'])
y0 += max([cr.text_extents(char)[3] for char in symbol if char.isalpha()]) / 2
for i, label in enumerate(labels):
for j, char in enumerate(label):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(char)
if i == 0 and j == 0:
# Center heavy atom at (x0, y0)
x = x0 - width / 2.0 - xbearing
y = y0
else:
# Left-justify other atoms (for now)
x = x0
y = y0
if char.isdigit(): y += height / 2.0
coordinates.append((x,y))
x0 = x + xadvance
x = 1000000; y = 1000000; width = 0; height = 0
startWidth = 0; endWidth = 0
for i, char in enumerate(symbol):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
extents = cr.text_extents(char)
if coordinates[i][0] + extents[0] < x: x = coordinates[i][0] + extents[0]
if coordinates[i][1] + extents[1] < y: y = coordinates[i][1] + extents[1]
width += extents[4] if i < len(symbol) - 1 else extents[2]
if extents[3] > height: height = extents[3]
if i == 0: startWidth = extents[2]
if i == len(symbol) - 1: endWidth = extents[2]
if not heavyFirst:
for i in range(len(coordinates)):
coordinates[i] = (coordinates[i][0] - (width - startWidth / 2 - endWidth / 2), coordinates[i][1])
x -= width - startWidth / 2 - endWidth / 2
# Background
x1 = x - 2; y1 = y - 2; x2 = x + width + 2; y2 = y + height + 2; r = 4
cr.move_to(x1 + r, y1)
cr.line_to(x2 - r, y1)
cr.curve_to(x2 - r/2, y1, x2, y1 + r/2, x2, y1 + r)
cr.line_to(x2, y2 - r)
cr.curve_to(x2, y2 - r/2, x2 - r/2, y2, x2 - r, y2)
cr.line_to(x1 + r, y2)
cr.curve_to(x1 + r/2, y2, x1, y2 - r/2, x1, y2 - r)
cr.line_to(x1, y1 + r)
cr.curve_to(x1, y1 + r/2, x1 + r/2, y1, x1 + r, y1)
cr.close_path()
cr.save()
cr.set_operator(cairo.OPERATOR_SOURCE)
cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
cr.fill()
cr.restore()
boundingRect = [x1, y1, x2, y2]
# Set color for text
if heavyAtom == 'C': cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
elif heavyAtom == 'N': cr.set_source_rgba(0.0, 0.0, 1.0, 1.0)
elif heavyAtom == 'O': cr.set_source_rgba(1.0, 0.0, 0.0, 1.0)
elif heavyAtom == 'F': cr.set_source_rgba(0.5, 0.75, 1.0, 1.0)
elif heavyAtom == 'Si': cr.set_source_rgba(0.5, 0.5, 0.75, 1.0)
elif heavyAtom == 'Al': cr.set_source_rgba(0.75, 0.5, 0.5, 1.0)
elif heavyAtom == 'P': cr.set_source_rgba(1.0, 0.5, 0.0, 1.0)
elif heavyAtom == 'S': cr.set_source_rgba(1.0, 0.75, 0.5, 1.0)
elif heavyAtom == 'Cl': cr.set_source_rgba(0.0, 1.0, 0.0, 1.0)
elif heavyAtom == 'Br': cr.set_source_rgba(0.6, 0.2, 0.2, 1.0)
elif heavyAtom == 'I': cr.set_source_rgba(0.5, 0.0, 0.5, 1.0)
else: cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
# Text itself
for i, char in enumerate(symbol):
cr.set_font_size(self.options['fontSizeSubscript' if char.isdigit() else 'fontSizeNormal'])
xbearing, ybearing, width, height, xadvance, yadvance = cr.text_extents(char)
xi, yi = coordinates[i]
cr.move_to(xi, yi)
cr.show_text(char)
x, y = coordinates[0] if heavyFirst else coordinates[-1]
else:
x = x0; y = y0; width = 0; height = 0
boundingRect = [x0 - 0.5, y0 - 0.5, x0 + 0.5, y0 + 0.5]
heavyAtom = ''
# Draw radical electrons and charges
# These will be placed either horizontally along the top or bottom of the
# atom or vertically along the left or right of the atom
orientation = ' '
if len(atom.bonds) == 0:
if len(symbol) == 1: orientation = 'r'
else: orientation = 'l'
elif len(atom.bonds) == 1:
# Terminal atom - we require a horizontal arrangement if there are
# more than just the heavy atom
atom1 = atom.bonds.keys()[0]
vector = self.coordinates[atoms.index(atom),:] - self.coordinates[atoms.index(atom1),:]
if len(symbol) <= 1:
angle = math.atan2(vector[1], vector[0])
if 3 * math.pi / 4 <= angle or angle < -3 * math.pi / 4: orientation = 'l'
elif -3 * math.pi / 4 <= angle < -1 * math.pi / 4: orientation = 'b'
elif -1 * math.pi / 4 <= angle < 1 * math.pi / 4: orientation = 'r'
else: orientation = 't'
else:
if vector[1] <= 0:
orientation = 'b'
else:
orientation = 't'
else:
# Internal atom
# First try to see if there is a "preferred" side on which to place the
# radical/charge data, i.e. if the bonds are unbalanced
vector = numpy.zeros(2, numpy.float64)
for atom1 in atom.bonds:
vector += self.coordinates[atoms.index(atom),:] - self.coordinates[atoms.index(atom1),:]
if numpy.linalg.norm(vector) < 1e-4:
# All of the bonds are balanced, so we'll need to be more shrewd
angles = []
for atom1 in atom.bonds:
vector = self.coordinates[atoms.index(atom1),:] - self.coordinates[atoms.index(atom),:]
angles.append(math.atan2(vector[1], vector[0]))
# Try one more time to see if we can use one of the four sides
# (due to there being no bonds in that quadrant)
# We don't even need a full 90 degrees open (using 60 degrees instead)
if all([ 1 * math.pi / 3 >= angle or angle >= 2 * math.pi / 3 for angle in angles]): orientation = 't'
elif all([-2 * math.pi / 3 >= angle or angle >= -1 * math.pi / 3 for angle in angles]): orientation = 'b'
elif all([-1 * math.pi / 6 >= angle or angle >= 1 * math.pi / 6 for angle in angles]): orientation = 'r'
elif all([ 5 * math.pi / 6 >= angle or angle >= -5 * math.pi / 6 for angle in angles]): orientation = 'l'
else:
# If we still don't have it (e.g. when there are 4+ equally-
# spaced bonds), just put everything in the top right for now
orientation = 'tr'
else:
# There is an unbalanced side, so let's put the radical/charge data there
angle = math.atan2(vector[1], vector[0])
if 3 * math.pi / 4 <= angle or angle < -3 * math.pi / 4: orientation = 'l'
elif -3 * math.pi / 4 <= angle < -1 * math.pi / 4: orientation = 'b'
elif -1 * math.pi / 4 <= angle < 1 * math.pi / 4: orientation = 'r'
else: orientation = 't'
cr.set_font_size(self.options['fontSizeNormal'])
extents = cr.text_extents(heavyAtom)
# (xi, yi) mark the center of the space in which to place the radicals and charges
if orientation[0] == 'l':
xi = x - 3
yi = y - extents[3]/2
elif orientation[0] == 'b':
xi = x + extents[0] + extents[2]/2
yi = y - extents[3] - 4
elif orientation[0] == 'r':
xi = x + extents[0] + extents[2] + 4
yi = y - extents[3]/2
elif orientation[0] == 't':
xi = x + extents[0] + extents[2]/2
yi = y + 4
# If we couldn't use one of the four sides, then offset the radical/charges
# horizontally by a few pixels, in hope that this avoids overlap with an
# existing bond
if len(orientation) > 1: xi += 4
# Get width and height
cr.set_font_size(self.options['fontSizeSubscript'])
width = 0.0; height = 0.0
if orientation[0] == 'b' or orientation[0] == 't':
if atom.radicalElectrons > 0:
width += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1)
height = atom.radicalElectrons * 2
text = ''
if atom.radicalElectrons > 0 and atom.charge != 0: width += 1
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
width += extents[2] + 1
height = extents[3]
elif orientation[0] == 'l' or orientation[0] == 'r':
if atom.radicalElectrons > 0:
height += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1)
width = atom.radicalElectrons * 2
text = ''
if atom.radicalElectrons > 0 and atom.charge != 0: height += 1
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
height += extents[3] + 1
width = extents[2]
# Move (xi, yi) to top left corner of space in which to draw radicals and charges
xi -= width / 2.0; yi -= height / 2.0
# Update bounding rectangle if necessary
if width > 0 and height > 0:
if xi < boundingRect[0]:
boundingRect[0] = xi
if yi < boundingRect[1]:
boundingRect[1] = yi
if xi + width > boundingRect[2]:
boundingRect[2] = xi + width
if yi + height > boundingRect[3]:
boundingRect[3] = yi + height
if orientation[0] == 'b' or orientation[0] == 't':
# Draw radical electrons first
for i in range(atom.radicalElectrons):
cr.new_sub_path()
cr.arc(xi + 3 * i + 1, yi + height/2, 1, 0, 2 * math.pi)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.fill()
if atom.radicalElectrons > 0: xi += atom.radicalElectrons * 2 + (atom.radicalElectrons - 1) + 1
# Draw charges second
text = ''
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
cr.move_to(xi, yi - extents[1])
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
# Draw lone electron pairs
# Draw them for nitrogen containing molecules only
if drawLonePairs:
for i in range(atom.lonePairs):
cr.new_sub_path()
if i == 0:
x1lp = x-2
y1lp = y-8
x2lp = x+2
y2lp = y-12
elif i == 1:
x1lp = x+12
y1lp = y-8
x2lp = x+8
y2lp = y-12
elif i == 2:
x1lp = x-2
y1lp = y-1
x2lp = x+2
y2lp = y+3
self.__drawLine(cr, x1lp, y1lp, x2lp, y2lp)
elif orientation[0] == 'l' or orientation[0] == 'r':
# Draw charges first
text = ''
if atom.charge == 1: text = '+'
elif atom.charge > 1: text = '{0:d}+'.format(atom.charge)
elif atom.charge == -1: text = u'\u2013'
elif atom.charge < -1: text = u'{0:d}\u2013'.format(abs(atom.charge))
if text != '':
extents = cr.text_extents(text)
cr.move_to(xi - extents[2]/2, yi - extents[1])
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.show_text(text)
if atom.charge != 0: yi += extents[3] + 1
# Draw radical electrons second
for i in range(atom.radicalElectrons):
cr.new_sub_path()
cr.arc(xi + width/2, yi + 3 * i + 1, 1, 0, 2 * math.pi)
cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
cr.fill()
# Draw lone electron pairs
# Draw them for nitrogen species only
if drawLonePairs:
for i in range (atom.lonePairs):
cr.new_sub_path()
if i == 0:
x1lp = x-2
y1lp = y-8
x2lp = x+2
y2lp = y-12
elif i == 1:
x1lp = x+12
y1lp = y-8
x2lp = x+8
y2lp = y-12
elif i == 2:
x1lp = x-2
y1lp = y-1
x2lp = x+2
y2lp = y+3
self.__drawLine(cr, x1lp, y1lp, x2lp, y2lp)
# Update bounding rect to ensure atoms are included
if boundingRect[0] < self.left:
self.left = boundingRect[0]
if boundingRect[1] < self.top:
self.top = boundingRect[1]
if boundingRect[2] > self.right:
self.right = boundingRect[2]
if boundingRect[3] > self.bottom:
self.bottom = boundingRect[3]
def __make_single_bonds(self):
"""This method converts all bonds to single bonds and then returns
a dictionary of Bond object keys with the old bond order as a value"""
dictionary = {}
for atom1 in self.molecule.atoms:
for atom2, bond in atom1.bonds.items():
if not bond.isSingle():
dictionary[bond] = bond.getOrderNum()
bond.setOrderNum(1)
return dictionary
def __replace_bonds(self,bond_order_dictionary):
"""
Sets the bond order in self.molecule equal to the orders in bond_order_dictionary
which is obtained from __make_single_bonds().
"""
for bond, order in bond_order_dictionary.items():
bond.setOrderNum(order)
################################################################################
class ReactionDrawer:
"""
This class provides functionality for drawing chemical reactions using the
skeletal formula of each reactant and product molecule via the Cairo 2D
graphics engine. The most common use case is simply::
ReactionDrawer().draw(reaction, format='png', path='reaction.png')
where ``reaction`` is the :class:`Reaction` object to draw. You can also
pass a dict of options to the constructor to affect how the molecules are
drawn.
"""
def __init__(self, options=None):
self.options = MoleculeDrawer().options.copy()
self.options.update({
'arrowLength': 36,
})
if options: self.options.update(options)
def draw(self, reaction, format, path=None):
"""
Draw the given `reaction` using the given image `format` - pdf, svg,
ps, or png. If `path` is given, the drawing is saved to that location
on disk.
This function returns the Cairo surface and context used to create the
drawing, as well as a bounding box for the molecule being drawn as the
tuple (`left`, `top`, `width`, `height`).
"""
# The Cairo 2D graphics library (and its Python wrapper) is required for
# the reaction drawing algorithm
try:
import cairocffi as cairo
except ImportError:
try:
import cairo
except ImportError:
logging.error('Cairo not found; molecule will not be drawn.')
return
from .molecule import Molecule
from rmgpy.species import Species
fontFamily = self.options['fontFamily']
fontSizeNormal = self.options['fontSizeNormal']
# First draw each of the reactants and products
reactants = []; products = []
for reactant in reaction.reactants:
if isinstance(reactant, Species):
molecule = reactant.molecule[0]
elif isinstance(reactant, Molecule):
molecule = reactant
reactants.append(MoleculeDrawer().draw(molecule, format))
for product in reaction.products:
if isinstance(product, Species):
molecule = product.molecule[0]
elif isinstance(product, Molecule):
molecule = product
products.append(MoleculeDrawer().draw(molecule, format))
# Next determine size required for surface
rxn_width = 0; rxn_height = 0; rxn_top = 0
for surface, cr, rect in reactants + products:
left, top, width, height = rect
rxn_width += width
if height > rxn_height: rxn_height = height
if height + top > rxn_top: rxn_top = height + top
rxn_top = 0.5 * rxn_height - rxn_top
# Also include '+' and reaction arrow in width
cr.set_font_size(fontSizeNormal)
plus_extents = cr.text_extents(' + ')
arrow_width = self.options['arrowLength']
rxn_width += (len(reactants)-1) * plus_extents[4] + arrow_width + (len(products)-1) * plus_extents[4]
# Now make the surface for the reaction and render each molecule on it
rxn_surface = createNewSurface(format, path, width=rxn_width, height=rxn_height)
rxn_cr = cairo.Context(rxn_surface)
# Draw white background
rxn_cr.set_source_rgba(1.0, 1.0, 1.0, 1.0)
rxn_cr.paint()
# Draw reactants
rxn_x = 0.0; rxn_y = 0.0
for index, reactant in enumerate(reactants):
surface, cr, rect = reactant
left, top, width, height = rect
if index > 0:
# Draw the "+" between the reactants
rxn_cr.save()
rxn_cr.set_font_size(fontSizeNormal)
rxn_y = rxn_top + 0.5 * (rxn_height - plus_extents[3])
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.move_to(rxn_x, rxn_y - plus_extents[1])
rxn_cr.show_text(' + ')
rxn_cr.restore()
rxn_x += plus_extents[4]
# Draw the reactant
rxn_y = top + rxn_top + 0.5 * rxn_height
if rxn_y < 0 : rxn_y = 0
rxn_cr.save()
rxn_cr.set_source_surface(surface, rxn_x, rxn_y)
rxn_cr.paint()
rxn_cr.restore()
rxn_x += width
# Draw reaction arrow
# Unfortunately Cairo does not have arrow drawing built-in, so we must
# draw the arrow head ourselves
rxn_cr.save()
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.set_line_width(1.0)
rxn_cr.move_to(rxn_x + 8, rxn_top + 0.5 * rxn_height)
rxn_cr.line_to(rxn_x + arrow_width - 8, rxn_top + 0.5 * rxn_height)
rxn_cr.move_to(rxn_x + arrow_width - 14, rxn_top + 0.5 * rxn_height - 3.0)
rxn_cr.line_to(rxn_x + arrow_width - 8, rxn_top + 0.5 * rxn_height)
rxn_cr.line_to(rxn_x + arrow_width - 14, rxn_top + 0.5 * rxn_height + 3.0)
rxn_cr.stroke()
rxn_cr.restore()
rxn_x += arrow_width
# Draw products
for index, product in enumerate(products):
surface, cr, rect = product
left, top, width, height = rect
if index > 0:
# Draw the "+" between the products
rxn_cr.save()
rxn_cr.set_font_size(fontSizeNormal)
rxn_y = rxn_top + 0.5 * (rxn_height - plus_extents[3])
rxn_cr.set_source_rgba(0.0, 0.0, 0.0, 1.0)
rxn_cr.move_to(rxn_x, rxn_y - plus_extents[1])
rxn_cr.show_text(' + ')
rxn_cr.restore()
rxn_x += plus_extents[4]
# Draw the product
rxn_y = top + rxn_top + 0.5 * rxn_height
if rxn_y < 0 : rxn_y = 0
rxn_cr.save()
rxn_cr.set_source_surface(surface, rxn_x, rxn_y)
rxn_cr.paint()
rxn_cr.restore()
rxn_x += width
# Finish Cairo drawing
if format == 'png':
rxn_surface.write_to_png(path)
else:
rxn_surface.finish()
|
mit
| 4,890,292,243,272,668,000 | 45.023359 | 191 | 0.526934 | false |
Inspq/ansible
|
lib/ansible/modules/web_infrastructure/htpasswd.py
|
1
|
9184
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Nimbis Services, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: htpasswd
version_added: "1.3"
short_description: manage user files for basic authentication
description:
- Add and remove username/password entries in a password file using htpasswd.
- This is used by web servers such as Apache and Nginx for basic authentication.
options:
path:
required: true
aliases: [ dest, destfile ]
description:
- Path to the file that contains the usernames and passwords
name:
required: true
aliases: [ username ]
description:
- User name to add or remove
password:
required: false
description:
- Password associated with user.
- Must be specified if user does not exist yet.
crypt_scheme:
required: false
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt"
description:
- Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state:
required: false
choices: [ present, absent ]
default: "present"
description:
- Whether the user entry should be present or not
create:
required: false
choices: [ "yes", "no" ]
default: "yes"
description:
- Used with C(state=present). If specified, the file will be created
if it does not already exist. If set to "no", will fail if the
file does not exist
notes:
- "This module depends on the I(passlib) Python library, which needs to be installed on all target systems."
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requirements: [ passlib>=1.6 ]
author: "Ansible Core Team"
"""
EXAMPLES = """
# Add a user to a password file and ensure permissions are set
- htpasswd:
path: /etc/nginx/passwdfile
name: janedoe
password: '9s36?;fyNp'
owner: root
group: www-data
mode: 0640
# Remove a user from a password file
- htpasswd:
path: /etc/apache2/passwdfile
name: foobar
state: absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd:
path: /etc/mail/passwords
name: alex
password: oedu2eGh
crypt_scheme: md5_crypt
"""
import os
import tempfile
from distutils.version import LooseVersion
try:
from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib
except ImportError:
passlib_installed = False
else:
passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest):
destpath = os.path.dirname(dest)
if not os.path.exists(destpath):
os.makedirs(destpath)
def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present
Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes)
if not os.path.exists(dest):
if not create:
raise ValueError('Destination %s does not exist' % dest)
if check_mode:
return ("Create %s" % dest, True)
create_missing_directories(dest)
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Created %s and added %s" % (dest, username), True)
else:
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else:
ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None
if getattr(ht, 'check_password', None):
found = ht.check_password(username, password)
else:
found = ht.verify(username, password)
if found:
return ("%s already present" % username, False)
else:
if not check_mode:
if getattr(ht, 'set_password', None):
ht.set_password(username, password)
else:
ht.update(username, password)
ht.save()
return ("Add/update %s" % username, True)
def absent(dest, username, check_mode):
""" Ensures user is absent
Returns (msg, changed) """
if LooseVersion(passlib.__version__) >= LooseVersion('1.6'):
ht = HtpasswdFile(dest, new=False)
else:
ht = HtpasswdFile(dest)
if username not in ht.users():
return ("%s not present" % username, False)
else:
if not check_mode:
ht.delete(username)
ht.save()
return ("Remove %s" % username, True)
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
arg_spec = dict(
path=dict(required=True, aliases=["dest", "destfile"]),
name=dict(required=True, aliases=["username"]),
password=dict(required=False, default=None, no_log=True),
crypt_scheme=dict(required=False, default="apr_md5_crypt"),
state=dict(required=False, default="present"),
create=dict(type='bool', default='yes'),
)
module = AnsibleModule(argument_spec=arg_spec,
add_file_common_args=True,
supports_check_mode=True)
path = module.params['path']
username = module.params['name']
password = module.params['password']
crypt_scheme = module.params['crypt_scheme']
state = module.params['state']
create = module.params['create']
check_mode = module.check_mode
if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[ f.write(line) for line in lines if line.strip() ]
finally:
f.close()
try:
if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
elif state == 'absent':
if not os.path.exists(path):
module.exit_json(msg="%s not present" % username,
warnings="%s does not exist" % path, changed=False)
(msg, changed) = absent(path, username, check_mode)
else:
module.fail_json(msg="Invalid state: %s" % state)
check_file_attrs(module, changed, msg)
module.exit_json(msg=msg, changed=changed)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
if __name__ == '__main__':
main()
|
gpl-3.0
| -283,431,518,286,145,500 | 31.338028 | 110 | 0.62304 | false |
avilaton/sqlalchemy-continuum
|
sqlalchemy_continuum/transaction.py
|
1
|
5496
|
from datetime import datetime
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
import six
import sqlalchemy as sa
from sqlalchemy.ext.compiler import compiles
from .dialects.postgresql import (
CreateTemporaryTransactionTableSQL,
InsertTemporaryTransactionSQL,
TransactionTriggerSQL
)
from .exc import ImproperlyConfigured
from .factory import ModelFactory
@compiles(sa.types.BigInteger, 'sqlite')
def compile_big_integer(element, compiler, **kw):
return 'INTEGER'
class TransactionBase(object):
issued_at = sa.Column(sa.DateTime, default=datetime.utcnow)
@property
def entity_names(self):
"""
Return a list of entity names that changed during this transaction.
"""
return [changes.entity_name for changes in self.changes]
@property
def changed_entities(self):
"""
Return all changed entities for this transaction log entry.
Entities are returned as a dict where keys are entity classes and
values lists of entitites that changed in this transaction.
"""
manager = self.__versioning_manager__
tuples = set(manager.version_class_map.items())
entities = {}
session = sa.orm.object_session(self)
for class_, version_class in tuples:
if class_.__name__ not in self.entity_names:
continue
tx_column = manager.option(class_, 'transaction_column_name')
entities[version_class] = (
session
.query(version_class)
.filter(getattr(version_class, tx_column) == self.id)
).all()
return entities
procedure_sql = """
CREATE OR REPLACE FUNCTION transaction_temp_table_generator()
RETURNS TRIGGER AS $$
BEGIN
{temporary_transaction_sql}
INSERT INTO temporary_transaction (id) VALUES (NEW.id);
RETURN NEW;
END;
$$
LANGUAGE plpgsql
"""
def create_triggers(cls):
sa.event.listen(
cls.__table__,
'after_create',
sa.schema.DDL(
procedure_sql.format(
temporary_transaction_sql=CreateTemporaryTransactionTableSQL(),
insert_temporary_transaction_sql=(
InsertTemporaryTransactionSQL(
transaction_id_values='NEW.id'
)
),
)
)
)
sa.event.listen(
cls.__table__,
'after_create',
sa.schema.DDL(str(TransactionTriggerSQL(cls)))
)
sa.event.listen(
cls.__table__,
'after_drop',
sa.schema.DDL(
'DROP FUNCTION IF EXISTS transaction_temp_table_generator()'
)
)
class TransactionFactory(ModelFactory):
model_name = 'Transaction'
def __init__(self, remote_addr=True):
self.remote_addr = remote_addr
def create_class(self, manager):
"""
Create Transaction class.
"""
class Transaction(
manager.declarative_base,
TransactionBase
):
__tablename__ = 'transaction'
__versioning_manager__ = manager
id = sa.Column(
sa.types.BigInteger,
primary_key=True,
autoincrement=True
)
if self.remote_addr:
remote_addr = sa.Column(sa.String(50))
if manager.user_cls:
user_cls = manager.user_cls
registry = manager.declarative_base._decl_class_registry
if isinstance(user_cls, six.string_types):
try:
user_cls = registry[user_cls]
except KeyError:
raise ImproperlyConfigured(
'Could not build relationship between Transaction'
' and %s. %s was not found in declarative class '
'registry. Either configure VersioningManager to '
'use different user class or disable this '
'relationship ' % (user_cls, user_cls)
)
user_id = sa.Column(
sa.inspect(user_cls).primary_key[0].type,
sa.ForeignKey(
'%s.%s' % (user_cls.__tablename__, sa.inspect(user_cls).primary_key[0].name)
),
index=True
)
user = sa.orm.relationship(user_cls)
def __repr__(self):
fields = ['id', 'issued_at', 'user']
field_values = OrderedDict(
(field, getattr(self, field))
for field in fields
if hasattr(self, field)
)
return '<Transaction %s>' % ', '.join(
(
'%s=%r' % (field, value)
if not isinstance(value, six.integer_types)
# We want the following line to ensure that longs get
# shown without the ugly L suffix on python 2.x
# versions
else '%s=%d' % (field, value)
for field, value in field_values.items()
)
)
if manager.options['native_versioning']:
create_triggers(Transaction)
return Transaction
|
bsd-3-clause
| -4,239,641,394,936,761,300 | 29.876404 | 100 | 0.532751 | false |
RTHMaK/RPGOne
|
deep_qa-master/deep_qa/tensors/masked_operations.py
|
1
|
4270
|
from keras import backend as K
from .backend import switch
def masked_batch_dot(tensor_a, tensor_b, mask_a, mask_b):
'''
The simplest case where this function is applicable is the following:
tensor_a: (batch_size, a_length, embed_dim)
tensor_b: (batch_size, b_length, embed_dim)
mask_a: None or (batch_size, a_length)
mask_b: None or (batch_size, b_length)
Returns:
a_dot_b: (batch_size, a_length, b_length), with zeros for masked elements.
This function will also work for larger tensors, as long as `abs(K.ndim(tensor_a) -
K.ndim(tensor_b)) < 1` (this is due to the limitations of `K.batch_dot`). We always assume the
dimension to perform the dot is the last one, and that the masks have one fewer dimension than
the tensors.
'''
if K.ndim(tensor_a) < K.ndim(tensor_b):
# To simplify the logic below, we'll make sure that tensor_a is always the bigger one.
tensor_a, tensor_b = tensor_b, tensor_a
mask_a, mask_b = mask_b, mask_a
if K.ndim(tensor_a) > 3 and K.backend() == 'theano':
raise RuntimeError("K.batch_dot() in theano is broken for tensors with more than"
" three dimensions. Use tensorflow instead.")
a_dot_axis = K.ndim(tensor_a) - 1
b_dot_axis = K.ndim(tensor_b) - 1
if b_dot_axis < a_dot_axis:
tensor_b = K.expand_dims(tensor_b, axis=-1)
# (batch_size, a_length, b_length)
a_dot_b = K.batch_dot(tensor_a, tensor_b, axes=(a_dot_axis, b_dot_axis))
if b_dot_axis < a_dot_axis:
a_dot_b = K.squeeze(a_dot_b, axis=-1)
if mask_a is None and mask_b is None:
return a_dot_b
elif mask_a is None:
# (batch_size, a_length)
mask_a = K.sum(K.ones_like(tensor_a), axis=-1)
elif mask_b is None:
# (batch_size, b_length)
sum_axis = -1
if b_dot_axis < a_dot_axis:
sum_axis -= 1
mask_b = K.sum(K.ones_like(tensor_b), axis=sum_axis)
# Casting masks to float since we TF would complain if we multiplied bools.
float_mask_a = K.cast(mask_a, 'float32')
float_mask_b = K.cast(mask_b, 'float32')
if b_dot_axis < a_dot_axis:
float_mask_b = K.expand_dims(float_mask_b, axis=-1)
else:
float_mask_a = K.expand_dims(float_mask_a, axis=-1)
float_mask_b = K.expand_dims(float_mask_b, axis=-2)
# (batch_size, a_length, b_length)
a2b_mask = float_mask_a * float_mask_b
result = switch(a2b_mask, a_dot_b, K.zeros_like(a_dot_b))
return result
def masked_softmax(vector, mask):
"""
`K.softmax(vector)` does not work if some elements of `vector` should be masked. This performs
a softmax on just the non-masked portions of `vector` (passing None in for the mask is also
acceptable; you'll just get a regular softmax).
We assume that both `vector` and `mask` (if given) have shape (batch_size, vector_dim).
In the case that the input vector is completely masked, this function returns an array
of ``0.0``. This behavior may cause ``NaN`` if this is used as the last layer of a model
that uses categorial cross-entropy loss.
"""
# We calculate masked softmax in a numerically stable fashion, as done
# in https://github.com/rkadlec/asreader/blob/master/asreader/custombricks/softmax_mask_bricks.py
if mask is not None:
# Here we get normalized log probabilities for
# enhanced numerical stability.
mask = K.cast(mask, "float32")
input_masked = mask * vector
shifted = mask * (input_masked - K.max(input_masked, axis=1,
keepdims=True))
# We add epsilon to avoid numerical instability when
# the sum in the log yields 0.
normalization_constant = K.log(K.sum(mask * K.exp(shifted), axis=1,
keepdims=True) + K.epsilon())
normalized_log_probabilities = mask * (shifted - normalization_constant)
unmasked_probabilities = K.exp(normalized_log_probabilities)
return switch(mask, unmasked_probabilities, K.zeros_like(unmasked_probabilities))
else:
# There is no mask, so we use the provided ``K.softmax`` function.
return K.softmax(vector)
|
apache-2.0
| -8,834,312,388,142,465,000 | 42.571429 | 101 | 0.628806 | false |
OpenEneaLinux/rt-tools
|
partrt/test/test_partition.py
|
1
|
50786
|
#!/usr/bin/env python3
# Copyright (c) 2014 by Enea Software AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Enea Software AB nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Rules for adding new test cases:
# Test cases that has a prefix "PART" should be executed on a partitioned
# environment. Test cases that requires a "clean" environment should have
# prefix "NOPART".
import os
import sys
import subprocess
import getopt
import multiprocessing
import time
import re
import signal
# Test constants
PROC_LAST_CPU_ELEM = 38
PROC_RT_PRIO = 39
PROC_SCHED_POLICY = 40
SCHED_NORMAL = 0
SCHED_FIFO = 1
DEFERMENT_TICK_DISABLED = 4294967295
SUCCESS = 0
FAIL = 1
SUPPORTED_TARGETS = [
"keystone-evm",
"chiefriver",
"p2041rdb",
"crystalforest-server",
"romley-ivb"
]
class targetOptions:
def __init__(self, target):
self.rt_mask = {
"default" : (2 ** (multiprocessing.cpu_count() - 1)),
"keystone-evm" : 0xe,
"chiefriver" : 0xe,
"p2041rdb" : 0xe,
"crystalforest-server" : 0xaaaa, # NUMA node 1
"romley-ivb" : 0x820 # CPU 5 and CPU 11 (same core id in cpuinfo)
}[target]
# None, or node nr
self.numa = {
"default" : None,
"keystone-evm" : None,
"chiefriver" :None,
"p2041rdb" : None,
"crystalforest-server" : 1,
"romley-ivb" : None
}[target]
# Test globals
global verbose
global options
global ref_count_irqs
def print_msg(msg):
global verbose
if verbose == True:
print(msg)
################################################################################
# Helper functions
################################################################################
# Run shell command. Will tell the world about it if verbose is true
def run(cmd, func=None):
print_msg("Executing: '" + cmd + "'")
return subprocess.Popen(cmd, shell=True, preexec_fn=func, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# Convert Linux CPU list (i.e: 3,4,5-7) to bitmask. Return -1 on failure
def liststr2mask(liststr):
try:
valid_array = True
mask = 0
for elem in liststr.split(','):
range_str = elem.split('-')
if len(range_str) > 1:
for idx in range(int(range_str[0]),int(range_str[1]) + 1):
if idx >= 0:
mask |= (1 << idx)
else:
print_msg("liststr2mask: Illegal range:" + range_str)
valid_array = False
break
else:
idx = int(range_str[0])
if idx >= 0:
mask |= (1 << idx)
else:
print_msg("liststr2mask: Illegal index:" + str(idx))
valid_array = False
break
if valid_array == True:
return mask
else:
return -1
except:
print_msg ("liststr2mask: Failed because of exception: " +
str(sys.exc_info()[1]))
return -1
# Function that returns cpuset_dir and cpuset prefix
def get_cpusets():
if os.path.isdir("/sys/fs/cgroup/cpuset/"):
cpuset_dir = "/sys/fs/cgroup/cpuset/"
cpuset_prefix = "cpuset."
return cpuset_dir, cpuset_prefix
else:
print_msg("get_cpusets: Kernel is lacking support for cpuset")
return "", ""
# Check if bad parameter is detected
def bad_parameter(cmd, tc_name):
try:
p = run(cmd)
(stdout, stderr) = p.communicate()
if p.returncode == 0:
print_msg(tc_name + ": Failed: " + cmd +
": incorrectly returned normal")
return FAIL
else:
return SUCCESS
except:
print_msg ("bad_parameter: Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# Get process affinity. Returns task_name, CPU mask and last_cpu
def get_task_info(pid):
try:
task_name = ""
affinity = -1
last_cpu = -1
policy = -1
prio = -1
with open("/proc/" + str(pid) + "/status") as f:
for line in f.readlines():
if "Name:" in line:
task_name = line.split()[1]
if "Cpus_allowed:" in line:
affinity = int(line.split()[1], base=16)
# Check on what CPU the process was executed most recently
with open("/proc/" + str(pid) + "/stat") as f:
elements = f.readline().split()
last_cpu = int(elements[PROC_LAST_CPU_ELEM])
policy = int(elements[PROC_SCHED_POLICY])
prio = int(elements[PROC_RT_PRIO])
print_msg("get_task_info(" + str(pid) + "): Task name: " + task_name + ", PID: " + str(pid) + ", affinity: " + str(affinity) + ", last CPU: " + str(last_cpu) + ", policy: " + str(policy) + ", prio: " + str(prio))
return task_name, affinity, last_cpu, policy, prio
except:
print_msg ("get_task_info: Failed because of exception: " +
str(sys.exc_info()[1]))
return "", -1, -1, -1, -1
# Read the content of a comma separated file and return the result as an
# array of integers.
def read_cpumask(file_str):
try:
if os.path.isfile(file_str):
with open(file_str) as f:
content = f.readline();
val = content.rstrip().split(',');
val = [int(x, base=16) for x in val];
return val
except:
print_msg ("read_cpumask: Failed because of exception: " +
str(sys.exc_info()[1]))
return 0
# Check if file has expected value. If not return FAIL. If file does not exist
# or if file has expected value, return SUCCESS
# This file is expected to have a value on the format 00000000,00000fff
# The parameter expected_val is an array whith the least significant numbers in
def check_file_cpumask(file_str, expected_val):
try:
val = read_cpumask(file_str)
lendiff = len(val) - len(expected_val);
expected_val = ([0] * lendiff) + expected_val; # Pad with zero
if val != expected_val:
print_msg("check_file Failed: " + file_str +
" has value: " +
str(','.join([str(x) for x in val])) + " Expected: " +
str(','.join([str(x) for x in expected_val])))
return FAIL
return SUCCESS
except:
print_msg ("check_file: Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# Check if file has expected value. If not return FAIL. If file does not exist
# or if file has expected value, return SUCCESS
def check_file(file_str, expected_val, val_base):
try:
if os.path.isfile(file_str):
with open(file_str) as f:
val = int(f.readline(), base=val_base)
if val != expected_val:
print_msg("check_file Failed: " + file_str +
" has value: " + str(val) + " Expected: " +
str(expected_val))
return FAIL
return SUCCESS
except:
print_msg ("check_file: Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# Check that the undo sub-command does what it should. Takes expected
# environment as input parameters
def check_env(sched_rt_runtime_us, sched_tick_max_deferment, stat_interval,
numa_affinity, watchdog, cpumask, check_interval):
try:
# Check sched_rt_runtime_us cleanup
if (check_file("/proc/sys/kernel/sched_rt_runtime_us",
sched_rt_runtime_us, 10) == FAIL):
return FAIL
# Check sched_tick_max_deferment cleanup
# Check only if kernel configured for NO_HZ_FULL with patched tick
# deferment
if (check_file("/sys/kernel/debug/sched_tick_max_deferment",
sched_tick_max_deferment, 10) == FAIL):
return FAIL
# Check vmstat_interval cleanup
if (check_file("/proc/sys/vm/stat_interval",
stat_interval, 10) == FAIL):
return FAIL
# Get numa_affinity
if (check_file("/sys/bus/workqueue/devices/writeback/numa",
numa_affinity, 10) == FAIL):
return FAIL
# Check watchdog cleanup
if (check_file("/proc/sys/kernel/watchdog",
watchdog, 10) == FAIL):
return FAIL
# Check BWQ cleanup
if (check_file_cpumask("/sys/bus/workqueue/devices/writeback/cpumask",
[cpumask]) == FAIL):
return FAIL
# Check machine check cleanup (only CPU 0)
if (check_file(
"/sys/devices/system/machinecheck/machinecheck0/check_interval",
check_interval, 10) == FAIL):
return FAIL
return SUCCESS
except:
print_msg("check_env Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# Get environment settings
def get_env():
try:
sched_rt_runtime_us = 0
sched_tick_max_deferment = 0
stat_interval = 0
numa_affinity = 0
watchdog = 0
check_interval = 0
# Get RT throtteling values
if os.path.isfile("/proc/sys/kernel/sched_rt_runtime_us"):
with open("/proc/sys/kernel/sched_rt_runtime_us") as f:
sched_rt_runtime_us = int(f.readline())
# Get sched_tick_max_deferment
if os.path.isfile("/sys/kernel/debug/sched_tick_max_deferment"):
with open("/sys/kernel/debug/sched_tick_max_deferment") as f:
sched_tick_max_deferment = int(f.readline())
# Get vmstat_interval
if os.path.isfile("/proc/sys/vm/stat_interval"):
with open("/proc/sys/vm/stat_interval") as f:
stat_interval = int(f.readline())
# Get numa_affinity
if os.path.isfile("/sys/bus/workqueue/devices/writeback/numa"):
with open("/sys/bus/workqueue/devices/writeback/numa") as f:
numa_affinity = int(f.readline())
# Get watchdog
if os.path.isfile("/proc/sys/kernel/watchdog"):
with open("/proc/sys/kernel/watchdog") as f:
watchdog = int(f.readline())
# Get machine check interval (only check CPU 0)
file_str = ("/sys/devices/system/machinecheck/machinecheck0/" +
"check_interval")
if os.path.isfile(file_str):
with open(file_str) as f:
check_interval = int(f.readline(), base=10)
return (sched_rt_runtime_us, sched_tick_max_deferment, stat_interval,
numa_affinity, watchdog, check_interval)
except:
print_msg("get_env Failed because of exception: " +
str(sys.exc_info()[1]))
return -1,-1,-1,-1,-1,-1
# Check that partitions have been removed.
def check_cpuset_cleanup(rt_partition, nrt_partition):
try:
# Check that default irq affinity cleanup
with open("/proc/irq/default_smp_affinity") as f:
cpumask = 2 ** multiprocessing.cpu_count() - 1
default_affinity = int(f.readline(), base=16) & cpumask
if (default_affinity != cpumask):
print_msg("check_cpuset_cleanup: Bad default IRQ affinity:" +
" Expected " + hex(cpumask) + " got " +
hex(default_affinity))
return FAIL
# Check cpuset cleanup
cpuset_dir, cpuset_prefix = get_cpusets()
if os.path.isdir(cpuset_dir + "/" + rt_partition):
print_msg("check_cpuset_cleanup: Failed: " + cpuset_dir +
"/" + rt_partition + " is still present")
return FAIL
if os.path.isdir(cpuset_dir + "/" + nrt_partition):
print_msg("check_cpuset_cleanup: Failed: " + cpuset_dir +
"/" + nrt_partition + " is still present")
return FAIL
return SUCCESS
except:
print_msg("check_cpuset_cleanup Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# Count the number of IRQs that includes the RT CPUs in its affinity
# mask.
def count_irgs_in_rt():
global options
try:
n = 0;
rt_mask = options.rt_mask
for irqvector in os.listdir("/proc/irq/"):
if os.path.isdir("/proc/irq/" + irqvector):
with open("/proc/irq/" + irqvector + "/smp_affinity") as f:
affinity = int(f.readline(), base=16)
if (affinity & rt_mask != 0):
n += 1;
return n
except:
return -1
################################################################################
# Test cases
################################################################################
# PART_TC_0
# Preparation for test cases needed to be done before partrt create
# Calculate ref_count_irqs, needed by PART_TC_2_2
def part_tc_0_1_irq_affinity():
global ref_count_irqs;
ref_count_irqs = count_irgs_in_rt()
if (ref_count_irqs == -1):
print_msg("part_tc_0_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
return SUCCESS
# PART_TC_1
# Run partition and check return code and check affinity in stderr.
# Leaves system in a partitioned state.
def part_tc_1_1_prepare():
try:
if options.numa is not None:
cmd = ("partrt create -n " + str(options.numa))
else:
cmd = ("partrt create " + hex(options.rt_mask))
p = run(cmd)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("part_tc_1_1: Failed: partrt returned non-zero value: " +
str(p.returncode))
return FAIL
rt_mask = options.rt_mask
nrt_mask = ~rt_mask & (2 ** multiprocessing.cpu_count() - 1)
for line in stdout.decode('utf-8').splitlines():
if "Isolated CPUs (rt):" in line:
if rt_mask != liststr2mask(line.split(':')[1]):
print_msg("part_tc_1_1 : Failed, partrt returned bad RT CPU" +
" list:" + line.split(':')[1])
return FAIL
if "Non-isolated CPUs (nrt):" in line:
if nrt_mask != liststr2mask(line.split(':')[1]):
print_msg("part_tc_1_1 : Failed, partrt returned bad NRT" +
" CPU list:" + line.split(':')[1])
return FAIL
return SUCCESS
except:
print_msg("part_tc_1_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_1_2 Test the list sub-command
def part_tc_1_2_prepare():
try:
cmd = "partrt list"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("part_tc_1_2 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
found_rt = False
found_nrt = False
for line in stdout.decode('utf-8').splitlines():
if "Name:rt" in line:
real_mask = liststr2mask(line.split(":")[2])
rt_mask = options.rt_mask
if real_mask != rt_mask:
print_msg("part_tc_1_2 Failed: rt partition has CPU mask " +
hex(real_mask) + " expected: " + hex (rt_mask))
return FAIL
found_rt = True
if "Name:nrt" in line:
real_mask = liststr2mask(line.split(":")[2])
nrt_mask = (~options.rt_mask &
(2 ** multiprocessing.cpu_count() - 1))
if real_mask != nrt_mask:
print_msg("part_tc_1_2 Failed: nrt partition has CPU mask "
+ hex(real_mask) + " expected: " + hex (nrt_mask))
return FAIL
found_nrt = True
if not(found_rt and found_nrt):
print_msg("part_tc_1_2 Failed: Could not find all partitions")
return FAIL
return SUCCESS
except:
print_msg("part_tc_1_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_2_1
# Check that the default irq affinity is the NRT affinity mask
def part_tc_2_1_irq_affinity():
try:
with open("/proc/irq/default_smp_affinity") as f:
default_affinity = int(f.readline(), base=16)
rt_mask = options.rt_mask
nrt_mask = ~rt_mask & (2 ** multiprocessing.cpu_count() - 1)
if (default_affinity != nrt_mask):
print_msg("part_tc_2_1: Bad default IRQ affinity: Expected " +
hex(nrt_mask) + " got " + hex(default_affinity))
return FAIL
return SUCCESS
except:
print_msg("part_tc_2_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_2_2
# Check that at least one less IRQ includes the RT CPUs in its affinity
# mask.
def part_tc_2_2_irq_affinity():
global ref_count_irqs;
n = count_irgs_in_rt()
if (n == -1):
print_msg("part_tc_2_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
elif (n >= ref_count_irqs):
print_msg("part_tc_2_2: No IRQ was migrated")
return FAIL
else:
return SUCCESS
# PART_TC_3_1
# Check that load balancing only is enabled for the nrt cpuset
def part_tc_3_1_proc_affinity():
try:
(cpuset_dir, cpuset_prefix) = get_cpusets()
if len(cpuset_dir) == 0:
print_msg("part_tc_3_1: Kernel is lacking support for cpuset")
return FAIL
# Check root cpuset
sched_load_balance = (cpuset_dir + cpuset_prefix +
"sched_load_balance")
with open(sched_load_balance) as f:
load_balance = int(f.readline())
if load_balance != 0:
print_msg(
"part_tc_3_1: Load balance is not disabled in root cpuset")
return FAIL
# Check rt cpuset
sched_load_balance = (cpuset_dir + "rt/" + cpuset_prefix +
"sched_load_balance")
with open(sched_load_balance) as f:
load_balance = int(f.readline())
if load_balance != 0:
print_msg("part_tc_3_1: Load balance is not disabled in RT cpuset")
return FAIL
# Check nrt cpuset
sched_load_balance = (cpuset_dir + "nrt/" + cpuset_prefix +
"sched_load_balance")
with open(sched_load_balance) as f:
load_balance = int(f.readline())
if load_balance != 1:
print_msg("part_tc_3_1: Load balance is disabled in NRT cpuset")
return FAIL
return SUCCESS
except:
print_msg("part_tc_3_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
return SUCCESS
# PART_TC_3_2
# Check that no tasks are in rt cpuset
def part_tc_3_2_proc_affinity():
try:
cpuset_dir, cpuset_prefix = get_cpusets()
if len(cpuset_dir) == 0:
print_msg("part_tc_3_2: Kernel is lacking support for cpuset")
return FAIL
tasks_path = cpuset_dir + "rt/tasks"
with open(tasks_path) as f:
rt_tasks = f.readline()
if len(rt_tasks) != 0:
print_msg("part_tc_3_2: There are tasks in the RT cpuset")
return FAIL
return SUCCESS
except:
print_msg("part_tc_3_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_3_3
# Check that at least one process was migrated. I.e. there are tasks in nrt
def part_tc_3_3_proc_affinity():
try:
cpuset_dir, cpuset_prefix = get_cpusets()
if len(cpuset_dir) == 0:
print_msg("part_tc_3_3: Kernel is lacking support for cpuset")
return FAIL
tasks_path = cpuset_dir + "nrt/tasks"
with open(tasks_path) as f:
nrt_tasks = f.readline()
if len(nrt_tasks) == 0:
print_msg("part_tc_3_3: No tasks where migrated")
return FAIL
return SUCCESS
except:
print_msg("part_tc_3_3 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_4_1
# Test the partrt run rt command. Check that command is executed in correct
# RT partition.
def part_tc_4_1_run():
try:
rt_mask = options.rt_mask
cmd = "partrt run -f 60 rt watch ls"
p = run(cmd, func=os.setsid)
time.sleep(1)
returncode = p.poll()
if returncode is not None:
print("part_tc_4_1: " + cmd + " unexpectedly returned with code: "
+ str(p.returncode))
return FAIL
(task_name, affinity, last_cpu, policy, prio) = get_task_info(p.pid)
if affinity != rt_mask:
print_msg("part_tc_4_1: RT task: " + task_name +
" has bad affinity:" + hex(affinity))
return FAIL
if rt_mask & (1 << last_cpu) == 0:
print_msg("part_tc_4_1: RT task: " + task_name +
"executes on nrt CPU: " + str(last_cpu))
return FAIL
if policy != SCHED_FIFO:
print_msg("part_tc_4_1: RT task: " + task_name +
" has sched policy: " + str(policy))
return FAIL
if prio != 60:
print_msg("part_tc_4_1: RT task: " + task_name +
" has wrong priority: " + str(prio))
return FAIL
return SUCCESS
except:
print_msg("part_tc_4_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
finally:
if p is not None and p.poll() is None:
os.killpg(p.pid, signal.SIGTERM)
# PART_TC_4_2
# Test the partrt run nrt command. Check that command is executed in correct
# NRT partition.
def part_tc_4_2_run():
p = None
try:
rt_mask = options.rt_mask
nrt_mask = ~rt_mask & (2 ** multiprocessing.cpu_count() - 1)
cmd = "partrt run nrt watch ls"
p = run(cmd, func=os.setsid)
time.sleep(1)
returncode = p.poll()
if returncode is not None:
print("part_tc_4_2: " + cmd + " unexpectedly returned with code: "
+ str(returncode))
return FAIL
(task_name, affinity, last_cpu, policy, prio) = get_task_info(p.pid)
if affinity != nrt_mask:
print_msg("part_tc_4_2: Invalid nrt task affinity:" + hex(affinity))
return FAIL
if nrt_mask & (1 << last_cpu) == 0:
print_msg("part_tc_4_2: NRT task executes on nrt CPU: " + last_cpu)
return FAIL
if policy != SCHED_NORMAL:
print_msg("part_tc_4_2: NRT task has sched policy: " +
str(policy))
return FAIL
if prio != 0:
print_msg("part_tc_4_2: NRT task has wrong priority: " +
str(prio))
return FAIL
return SUCCESS
except:
print_msg("part_tc_4_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
finally:
if p is not None and p.poll() is None:
os.killpg(p.pid, signal.SIGTERM)
# PART_TC_4_3
# Test the -c flag of the run subcommand
def part_tc_4_3_run():
p = None
try:
rt_mask = options.rt_mask
bit = 0
while (rt_mask & (0x1 << bit)) == 0:
bit = bit + 1
cpu = 2 ** bit
cmd = "partrt run -c " + hex(cpu) + " -f 60 rt watch ls"
p = run(cmd, func=os.setsid)
time.sleep(1)
returncode = p.poll()
if returncode is not None:
print("part_tc_4_3: " + cmd + " unexpectedly returned with code: "
+ str(p.returncode))
return FAIL
(task_name, affinity, last_cpu, policy, prio) = get_task_info(p.pid)
if affinity != cpu:
print_msg("part_tc_4_3: RT task: " + task_name +
" has bad affinity: " + hex(affinity) + " expected: " +
hex(cpu))
return FAIL
return SUCCESS
except:
print_msg("part_tc_4_3 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
finally:
if p is not None and p.poll() is None:
os.killpg(p.pid, signal.SIGTERM)
# PART_TC_5
# Check that the tick has been disabled
def part_tc_5_tick_deferment():
try:
# Check only if kernel configured for NO_HZ_FULL with patched tick
# deferment
if os.path.isfile("/sys/kernel/debug/sched_tick_max_deferment"):
with open("/sys/kernel/debug/sched_tick_max_deferment") as f:
if int(f.readline()) != DEFERMENT_TICK_DISABLED:
print_msg("part_tc_5 Failed: sched_tick_max_deferment" +
" is not equal to " +
str(DEFERMENT_TICK_DISABLED))
return FAIL
return SUCCESS
except:
print_msg("part_tc_5 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_6
# Check that RT throttling has been disabled
def part_tc_6_rt_throttle():
try:
with open("/proc/sys/kernel/sched_rt_runtime_us") as f:
if int(f.readline()) != -1:
print_msg("part_tc_6 Failed: RT Throttling is not disabled")
return FAIL
return SUCCESS
except:
print_msg("part_tc_6 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_7
# Test bad parameters on partitioned system
def part_tc_7_bad_parameters():
p = None
try:
part_tc_name = "part_tc_7"
cmd = "partrt"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt asdfasdf"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt create 8"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run asdfasdf"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run rt asdfasdf"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run rt asdfasdf watch ls"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run rt asdfasdf 50 watch ls"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run rt -f 60 ls"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run ls"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt run -f 22 ls -l"
if bad_parameter(cmd, part_tc_name):
return FAIL
cmd = "partrt move asdf 1234"
if bad_parameter(cmd, part_tc_name):
return FAIL
#################################
# Create process that we can fiddle with
cmd = "while true; do sleep .1; done"
p = run(cmd, func=os.setsid)
cmd = "partrt move " + str(p.pid) + " asdf"
if bad_parameter(cmd, part_tc_name):
return FAIL
#################################
cmd = "partrt mov asdf rt"
if bad_parameter(cmd, part_tc_name):
return FAIL
return SUCCESS
except:
print_msg("part_tc_7 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
finally:
if p is not None and p.poll() is None:
os.killpg(p.pid, signal.SIGTERM)
# PART_TC_8
# Test that it is possible to move a process into the RT partition
# and that the process gets correct affinity
def part_tc_8_mov():
p1 = None
try:
rt_mask = options.rt_mask
nrt_mask = ~rt_mask & (2 ** multiprocessing.cpu_count() - 1)
cmd = "while true; do sleep .1; done"
p1 = run(cmd, func=os.setsid)
time.sleep(1)
returncode = p1.poll()
if returncode is not None:
print("part_tc_8: " + cmd + " unexpectedly returned with code: "
+ str(returncode))
return FAIL
# Check that the process executes within the NRT partition
(task_name, affinity, last_cpu, policy, prio) = get_task_info(p1.pid)
if affinity != nrt_mask:
print_msg("part_tc_8: NRT process has bad affinity: " +
hex(affinity) + " expected: " + hex(nrt_mask))
return FAIL
if rt_mask & (1 << last_cpu) != 0:
print_msg("part_tc_8: NRT process executes on RT CPU: " +
str(last_cpu))
return FAIL
# Move the process
bit = 0
while (rt_mask & (0x1 << bit)) == 0:
bit = bit + 1
cpu = 2 ** bit
cmd = ("partrt move -c " + hex(cpu) + " " + str(p1.pid) + " rt")
p2 = run(cmd, func=os.setsid)
p2.wait()
if p2.returncode != 0:
print("part_tc_8: " + cmd + "Returned with abnormal code: "
+ str(p2.returncode))
return FAIL
time.sleep(1)
# Check that the process executes within the RT partition
(task_name, affinity, last_cpu, policy, prio) = get_task_info(p1.pid)
if affinity != cpu:
print_msg("part_tc_8: RT process has bad affinity: " +
hex(affinity) + " expected: " + hex(cpu))
return FAIL
if rt_mask & (1 << last_cpu) == 0:
print_msg("part_tc_8: RT process executes on NRT CPU: "
+ str(last_cpu))
return FAIL
return SUCCESS
except:
print_msg("part_tc_8 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
finally:
if p1 is not None and p1.poll() is None:
os.killpg(p1.pid, signal.SIGTERM)
# PART_TC_9 Check that the environment has been changed
def part_tc_9_check_env():
try:
sched_rt_runtime_us = -1
sched_tick_max_deferment = DEFERMENT_TICK_DISABLED
stat_interval = 1000
numa_affinity = 0
watchdog = 0
cpumask = (~options.rt_mask &
(2 ** multiprocessing.cpu_count() - 1))
check_interval = 0
return check_env(sched_rt_runtime_us, sched_tick_max_deferment,
stat_interval, numa_affinity, watchdog, cpumask,
check_interval)
except:
print_msg("part_tc_9 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# PART_TC_10 Check that partition undo restore environment to default values
def part_tc_10_cleanup():
try:
cmd = "partrt undo"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("part_tc_10 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
cpumask = 2 ** multiprocessing.cpu_count() - 1
# Check cpusets and environment
if check_cpuset_cleanup("rt", "nrt") != SUCCESS:
return FAIL
if check_env(950000, 100, 1, 1, 1, cpumask, 300) != SUCCESS:
return FAIL
return SUCCESS
except:
print_msg("part_tc_10 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_1_1 Check that partition undo restores environment from file
# Leaves system in unpartitioned state
def nopart_tc_1_1_cleanup():
try:
cpumask = 2 ** multiprocessing.cpu_count() - 1
# Get environment
(sched_rt_runtime_us, sched_tick_max_deferment, stat_interval,
numa_affinity, watchdog, check_interval) = get_env()
# Create partitions
cmd = ("partrt create " + hex(options.rt_mask))
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("nopart_tc_1_1 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
# Remove partitions
cmd = "partrt undo -s /tmp/partrt_env"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("nopart_tc_1_1 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
# Check cpusets and environment
if check_cpuset_cleanup("rt", "nrt") != SUCCESS:
return FAIL
if check_env(sched_rt_runtime_us, sched_tick_max_deferment,
stat_interval, numa_affinity, watchdog,
cpumask, check_interval) != SUCCESS:
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_1_1 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_1_2 Check that the stanndard options and the create cmd-options
# works. Leaves system in unpartitioned state
def nopart_tc_1_2_cleanup():
try:
cpumask = 2 ** multiprocessing.cpu_count() - 1
# Get environment
(sched_rt_runtime_us, sched_tick_max_deferment, stat_interval,
numa_affinity, watchdog, check_interval) = get_env()
# Create partitions
cmd = ("partrt -r rt1 -n nrt1 create -a -b -c -d -m -r -t -w " +
hex(options.rt_mask))
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("nopart_tc_1_2 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
# Check that the environment was unmodified
if (check_env(sched_rt_runtime_us, sched_tick_max_deferment,
stat_interval, numa_affinity, watchdog, cpumask,
check_interval)
!= SUCCESS):
return FAIL
# Remove partitions
cmd = "partrt -r rt1 -n nrt1 undo"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("nopart_tc_1_2 Failed: " + cmd +
" returned with abnormal code: " + str(p.returncode))
return FAIL
# Check cpuset cleanup
if check_cpuset_cleanup("rt1", "nrt1") != SUCCESS:
return FAIL
# Check again that the environment was unmodified
if check_env(sched_rt_runtime_us, sched_tick_max_deferment,
stat_interval, numa_affinity, watchdog, cpumask,
check_interval) != SUCCESS:
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_1_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_2_1
# Check that help text is displayed
def nopart_tc_2_1_help_text():
try:
cmd = "partrt -h"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg(
"nopart_tc_2: Failed: partrt returned with abnormal code: ",
str(p.returncode))
return FAIL
found_usage = False
for line in stdout.decode('utf-8').splitlines():
if "Usage:" in line:
found_usage = True
break
if found_usage == False:
print_msg("nopart_tc_2: Failed: partrt returned corrupt help text")
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_2_2
# Check that help text is displayed for sub command
def nopart_tc_2_2_help_text():
try:
cmd = "partrt run -h"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg(
"nopart_tc_2_2: Failed: partrt returned with abnormal code: ",
p.returncode)
return FAIL
found_usage = False
for line in stdout.decode('utf-8').splitlines():
if "Usage:" in line:
found_usage = True
break
if found_usage == False:
print_msg(
"nopart_tc_2_2: Failed: partrt returned corrupt help text")
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_2_2 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_3
# Test bad parameters on unpartitioned system
def nopart_tc_3_bad_parameters():
try:
nopart_tc_name = "nopart_tc_3"
cmd = "partrt"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt asdfasdf"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt --asdfasdf"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt -asdfasdf"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt create -1"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
# This has to be updated for really large systems
cmd = "partrt create fffffffffffffffffffffffffffffff"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt -z run rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run -c 1234 rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run -c asdf rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run -c rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run -f 60 - rt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run nrt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt run -z nrt watch ls"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt move 0 rt"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt move -c 1234 0 rt"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt move -c asdf 0 rt"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
cmd = "partrt move -c 0 rt"
if bad_parameter(cmd, nopart_tc_name):
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_3 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
# NOPART_TC_4
# Check that Enea copyright is present
def nopart_tc_4_copyright():
try:
found_copyright = False
cmd = "partrt -V"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print_msg("nopart_tc_4: Failed: " + cmd +
": returned with abnormal code: ", p.returncode)
return FAIL
for line in stdout.decode('utf-8').splitlines():
found = re.search("Copyright \(C\) (.*) by Enea Software AB", line)
if found:
found_copyright = True
break
if found_copyright == False:
print_msg("nopart_tc_4: Failed: " + cmd +
": Could not find copyright text")
return FAIL
return SUCCESS
except:
print_msg("nopart_tc_4 Failed because of exception: " +
str(sys.exc_info()[1]))
return FAIL
def run_tc(nopart_tc_func, nopart_tc_name, expected_result):
print_msg(nopart_tc_name + ": Executing")
result = nopart_tc_func()
if result != expected_result:
test_result = 1
print_msg(nopart_tc_name + ": Failed")
return FAIL
else:
print_msg(nopart_tc_name + ": Passed")
return SUCCESS
def cleanup():
try:
cmd = "partrt undo"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print("test_partition: Failed: " + cmd +
": returned with abnormal code: ", p.returncode)
sys.exit(1)
cmd = "partrt -r rt1 -n nrt1 undo"
p = run(cmd, func=os.setsid)
(stdout, stderr) = p.communicate()
if p.returncode != 0:
print_msg((stdout, stderr))
print("test_partition: Failed: " + cmd +
": returned with abnormal code: ", p.returncode)
return sys.exit(1)
except:
print("test_partition: Failed because of exception " +
str(sys.exc_info()[1]))
sys.exit(1)
# input: programdir, gnuplotdir
def usage():
print('Usage:')
print('\tpartition_test <target> [-v] [-h]')
print('\tOptions:')
print('\t\t--help, -h:')
print('\t\t\tShow help text')
print('\t\t--verbose, -v:')
print('\t\t\tExtra verbose output')
print('\t\t\tSave test results')
print('')
print('If <target> is not one of the preconfigured targets:')
print(SUPPORTED_TARGETS)
print('a default configuration will be used.')
def main(argv):
global verbose
global options
verbose = False
target = ""
# Get mandatory parameter
if len(argv) == 0:
print('Missing target parameter')
print('')
usage()
exit(1)
else:
target = argv[0]
# Compare target against supported targets
if not(target in SUPPORTED_TARGETS):
msg = "Unknown target: " + target
msg += ": Trying default configuration"
target = "default"
print(msg)
options = targetOptions(target)
# Get optional parameters
try:
opts, args = getopt.getopt(argv[1:], "hs:vk", ["help",
"verbose"])
except getopt.GetoptError as err:
usage()
exit(-1)
for o, a in opts:
if o in ("-v", "verbose"):
verbose = True
elif o in ("-h", "help"):
usage()
exit(0)
else:
print('Unknown parameter: ', o)
usage()
exit(-1)
test_result = 0
# Remove any pre existing partition
cleanup()
# Run the tests
############# PART_TC_0_1 #############
test_result = (test_result | run_tc(part_tc_0_1_irq_affinity,
"PART_TC_0_1",
SUCCESS))
############# PART_TC_1_1 #############
test_result = (test_result | run_tc(part_tc_1_1_prepare,
"PART_TC_1_1",
SUCCESS))
############# PART_TC_1_2 #############
test_result = (test_result | run_tc(part_tc_1_2_prepare,
"PART_TC_1_2",
SUCCESS))
############# PART_TC_2_1 #############
test_result = (test_result | run_tc(part_tc_2_1_irq_affinity,
"PART_TC_2_1",
SUCCESS))
############# PART_TC_2_2 ##############
test_result = (test_result | run_tc(part_tc_2_2_irq_affinity,
"PART_TC_2_2",
SUCCESS))
############# PART_TC_3_1 ##############
test_result = (test_result | run_tc(part_tc_3_1_proc_affinity,
"PART_TC_3_1",
SUCCESS))
############# PART_TC_3_2 ##############
test_result = (test_result | run_tc(part_tc_3_2_proc_affinity,
"PART_TC_3_2",
SUCCESS))
############# PART_TC_3_3 ##############
test_result = (test_result | run_tc(part_tc_3_3_proc_affinity,
"PART_TC_3_3",
SUCCESS))
############# PART_TC_4_1 ##############
test_result = (test_result | run_tc(part_tc_4_1_run,
"PART_TC_4_1",
SUCCESS))
############# PART_TC_4_2 ##############
test_result = (test_result | run_tc(part_tc_4_2_run,
"PART_TC_4_2",
SUCCESS))
############# PART_TC_4_3 ##############
test_result = (test_result | run_tc(part_tc_4_3_run,
"PART_TC_4_3",
SUCCESS))
############# PART_TC_5 ##############
test_result = (test_result | run_tc(part_tc_5_tick_deferment,
"PART_TC_5",
SUCCESS))
############# PART_TC_6 ##############
test_result = (test_result | run_tc(part_tc_6_rt_throttle,
"PART_TC_6",
SUCCESS))
############# PART_TC_7 ##############
test_result = (test_result | run_tc(part_tc_7_bad_parameters,
"PART_TC_7",
SUCCESS))
############# PART_TC_8 ##############
test_result = (test_result | run_tc(part_tc_8_mov,
"PART_TC_8",
SUCCESS))
############# PART_TC_9 ##############
test_result = (test_result | run_tc(part_tc_9_check_env,
"PART_TC_9",
SUCCESS))
############# PART_TC_10 ##############
test_result = (test_result | run_tc(part_tc_10_cleanup,
"PART_TC_10",
SUCCESS))
############# NOPART_TC_1_1 ##############
test_result = (test_result | run_tc(nopart_tc_1_1_cleanup,
"NOPART_TC_1_1",
SUCCESS))
############# NOPART_TC_1_2 ##############
test_result = (test_result | run_tc(nopart_tc_1_2_cleanup,
"NOPART_TC_1_2",
SUCCESS))
############# NOPART_TC_2_1 ##############
test_result = (test_result | run_tc(nopart_tc_2_1_help_text,
"NOPART_TC_2_1",
SUCCESS))
############# NOPART_TC_2_2 ##############
test_result = (test_result | run_tc(nopart_tc_2_2_help_text,
"NOPART_TC_2_2",
SUCCESS))
############# NOPART_TC_3 ##############
test_result = (test_result | run_tc(nopart_tc_3_bad_parameters,
"NOPART_TC_3",
SUCCESS))
############# NOPART_TC_4 ##############
test_result = (test_result | run_tc(nopart_tc_4_copyright,
"NOPART_TC_4",
SUCCESS))
# Do final cleanup in case any cleanup test case failed and prevented
# cleanup. This could ofcourse fail if partrt undo is broken
cleanup()
if test_result == 0:
print("SUCCESS")
else:
print("FAIL")
sys.exit(test_result)
if __name__ == "__main__":
main(sys.argv[1:])
|
bsd-3-clause
| -2,476,286,391,616,804,400 | 31.286078 | 224 | 0.514709 | false |
spiceqa/tp-spice
|
spice/client-tests/help_version.py
|
1
|
1860
|
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
"""Test to verify the license from the About screen of remote-viewer.
"""
import os
import sys
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), "lib"))
import rv
import argparse
import subprocess
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description='Check remote-viwer version in Help->About.')
group = parser.add_mutually_exclusive_group(required=True)
parser.add_argument("ver", help="Expected version, eg: 2.0-7.el7. If 'find' \
specified, currently installed version is automatically \
detected.")
group.add_argument("-a", "--accesskeys", help="Use access keys.",
action="store_const", const="access_key", dest="method")
group.add_argument("-m", "--mouse", help="Use mouse.", action="store_const",
const="mouse", dest="method")
args = parser.parse_args()
app = rv.Application(method=args.method)
# Test assumes there is only one virtual display.
assert app.dsp_count() == 1
version = app.dsp1.help_version()
logger.info('Got version: %s', version)
ver = args.ver
if ver == 'find':
ver = subprocess.check_output(["rpm", "-q", "virt-viewer", "--queryformat",
"%{VERSION}-%{RELEASE}"])
logger.info('Required version: %s', ver)
assert ver in version
|
gpl-2.0
| -7,757,124,006,179,228,000 | 34.769231 | 79 | 0.686559 | false |
j0057/ansible-1
|
lib/ansible/utils/plugins.py
|
1
|
9861
|
# (c) 2012, Daniel Hokka Zakrisson <daniel@hozac.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import sys
import glob
import imp
from ansible import constants as C
from ansible import errors
MODULE_CACHE = {}
PATH_CACHE = {}
PLUGIN_PATH_CACHE = {}
_basedirs = []
def push_basedir(basedir):
# avoid pushing the same absolute dir more than once
basedir = os.path.realpath(basedir)
if basedir not in _basedirs:
_basedirs.insert(0, basedir)
class PluginLoader(object):
'''
PluginLoader loads plugins from the configured plugin directories.
It searches for plugins by iterating through the combined list of
play basedirs, configured paths, and the python path.
The first match is used.
'''
def __init__(self, class_name, package, config, subdir, aliases={}):
self.class_name = class_name
self.package = package
self.config = config
self.subdir = subdir
self.aliases = aliases
if not class_name in MODULE_CACHE:
MODULE_CACHE[class_name] = {}
if not class_name in PATH_CACHE:
PATH_CACHE[class_name] = None
if not class_name in PLUGIN_PATH_CACHE:
PLUGIN_PATH_CACHE[class_name] = {}
self._module_cache = MODULE_CACHE[class_name]
self._paths = PATH_CACHE[class_name]
self._plugin_path_cache = PLUGIN_PATH_CACHE[class_name]
self._extra_dirs = []
self._searched_paths = set()
def print_paths(self):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in self._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def _all_directories(self, dir):
results = []
results.append(dir)
for root, subdirs, files in os.walk(dir):
if '__init__.py' in files:
for x in subdirs:
results.append(os.path.join(root,x))
return results
def _get_package_paths(self):
''' Gets the path of a Python package '''
paths = []
if not self.package:
return []
if not hasattr(self, 'package_path'):
m = __import__(self.package)
parts = self.package.split('.')[1:]
self.package_path = os.path.join(os.path.dirname(m.__file__), *parts)
paths.extend(self._all_directories(self.package_path))
return paths
def _get_paths(self):
''' Return a list of paths to search for plugins in '''
if self._paths is not None:
return self._paths
ret = self._extra_dirs[:]
for basedir in _basedirs:
fullpath = os.path.realpath(os.path.join(basedir, self.subdir))
if os.path.isdir(fullpath):
files = glob.glob("%s/*" % fullpath)
# allow directories to be two levels deep
files2 = glob.glob("%s/*/*" % fullpath)
if files2 is not None:
files.extend(files2)
for file in files:
if os.path.isdir(file) and file not in ret:
ret.append(file)
if fullpath not in ret:
ret.append(fullpath)
# look in any configured plugin paths, allow one level deep for subcategories
if self.config is not None:
configured_paths = self.config.split(os.pathsep)
for path in configured_paths:
path = os.path.realpath(os.path.expanduser(path))
contents = glob.glob("%s/*" % path) + glob.glob("%s/*/*" % path)
for c in contents:
if os.path.isdir(c) and c not in ret:
ret.append(c)
if path not in ret:
ret.append(path)
# look for any plugins installed in the package subtree
ret.extend(self._get_package_paths())
# cache and return the result
self._paths = ret
return ret
def add_directory(self, directory, with_subdir=False):
''' Adds an additional directory to the search path '''
directory = os.path.realpath(directory)
if directory is not None:
if with_subdir:
directory = os.path.join(directory, self.subdir)
if directory not in self._extra_dirs:
# append the directory and invalidate the path cache
self._extra_dirs.append(directory)
self._paths = None
def find_plugin(self, name, suffixes=None):
''' Find a plugin named name '''
if not suffixes:
if self.class_name:
suffixes = ['.py']
else:
suffixes = ['.py', '']
potential_names = list('%s%s' % (name, s) for s in suffixes)
for full_name in potential_names:
if full_name in self._plugin_path_cache:
return self._plugin_path_cache[full_name]
found = None
for path in [p for p in self._get_paths() if p not in self._searched_paths]:
if os.path.isdir(path):
full_paths = (os.path.join(path, f) for f in os.listdir(path))
for full_path in (f for f in full_paths if os.path.isfile(f)):
for suffix in suffixes:
if full_path.endswith(suffix):
full_name = os.path.basename(full_path)
break
else: # Yes, this is a for-else: http://bit.ly/1ElPkyg
continue
if full_name not in self._plugin_path_cache:
self._plugin_path_cache[full_name] = full_path
self._searched_paths.add(path)
for full_name in potential_names:
if full_name in self._plugin_path_cache:
return self._plugin_path_cache[full_name]
# if nothing is found, try finding alias/deprecated
if not name.startswith('_'):
for alias_name in ('_%s' % n for n in potential_names):
# We've already cached all the paths at this point
if alias_name in self._plugin_path_cache:
return self._plugin_path_cache[alias_name]
return None
def has_plugin(self, name):
''' Checks if a plugin named name exists '''
return self.find_plugin(name) is not None
__contains__ = has_plugin
def get(self, name, *args, **kwargs):
''' instantiates a plugin of the given name using arguments '''
if name in self.aliases:
name = self.aliases[name]
path = self.find_plugin(name)
if path is None:
return None
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
return getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
def all(self, *args, **kwargs):
''' instantiates all plugins with the same arguments '''
for i in self._get_paths():
matches = glob.glob(os.path.join(i, "*.py"))
matches.sort()
for path in matches:
name, ext = os.path.splitext(os.path.basename(path))
if name.startswith("_"):
continue
if path not in self._module_cache:
self._module_cache[path] = imp.load_source('.'.join([self.package, name]), path)
yield getattr(self._module_cache[path], self.class_name)(*args, **kwargs)
action_loader = PluginLoader(
'ActionModule',
'ansible.runner.action_plugins',
C.DEFAULT_ACTION_PLUGIN_PATH,
'action_plugins'
)
cache_loader = PluginLoader(
'CacheModule',
'ansible.cache',
C.DEFAULT_CACHE_PLUGIN_PATH,
'cache_plugins'
)
callback_loader = PluginLoader(
'CallbackModule',
'ansible.callback_plugins',
C.DEFAULT_CALLBACK_PLUGIN_PATH,
'callback_plugins'
)
connection_loader = PluginLoader(
'Connection',
'ansible.runner.connection_plugins',
C.DEFAULT_CONNECTION_PLUGIN_PATH,
'connection_plugins',
aliases={'paramiko': 'paramiko_ssh'}
)
shell_loader = PluginLoader(
'ShellModule',
'ansible.runner.shell_plugins',
'shell_plugins',
'shell_plugins',
)
module_finder = PluginLoader(
'',
'ansible.modules',
C.DEFAULT_MODULE_PATH,
'library'
)
lookup_loader = PluginLoader(
'LookupModule',
'ansible.runner.lookup_plugins',
C.DEFAULT_LOOKUP_PLUGIN_PATH,
'lookup_plugins'
)
vars_loader = PluginLoader(
'VarsModule',
'ansible.inventory.vars_plugins',
C.DEFAULT_VARS_PLUGIN_PATH,
'vars_plugins'
)
filter_loader = PluginLoader(
'FilterModule',
'ansible.runner.filter_plugins',
C.DEFAULT_FILTER_PLUGIN_PATH,
'filter_plugins'
)
fragment_loader = PluginLoader(
'ModuleDocFragment',
'ansible.utils.module_docs_fragments',
os.path.join(os.path.dirname(__file__), 'module_docs_fragments'),
'',
)
|
gpl-3.0
| 5,597,246,298,605,420,000 | 31.4375 | 100 | 0.579353 | false |
wooga/airflow
|
tests/providers/google/cloud/operators/test_presto_to_gcs_system.py
|
1
|
6439
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from contextlib import closing, suppress
import pytest
from airflow.models import Connection
from airflow.providers.presto.hooks.presto import PrestoHook
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_BIGQUERY_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
try:
from airflow.utils.session import create_session
except ImportError:
# This is a hack to import create_session from old destination and
# fool the pre-commit check that looks for old imports...
# TODO remove this once we don't need to test this on 1.10
import importlib
db_module = importlib.import_module("airflow.utils.db")
create_session = getattr(db_module, "create_session")
GCS_BUCKET = os.environ.get("GCP_PRESTO_TO_GCS_BUCKET_NAME", "test-presto-to-gcs-bucket")
DATASET_NAME = os.environ.get("GCP_PRESTO_TO_GCS_DATASET_NAME", "test_presto_to_gcs_dataset")
CREATE_QUERY = """
CREATE TABLE memory.default.test_multiple_types (
-- Boolean
z_boolean BOOLEAN,
-- Integers
z_tinyint TINYINT,
z_smallint SMALLINT,
z_integer INTEGER,
z_bigint BIGINT,
-- Floating-Point
z_real REAL,
z_double DOUBLE,
-- Fixed-Point
z_decimal DECIMAL(10,2),
-- String
z_varchar VARCHAR(20),
z_char CHAR(20),
z_varbinary VARBINARY,
z_json JSON,
-- Date and Time
z_date DATE,
z_time TIME,
z_time_with_time_zone TIME WITH TIME ZONE,
z_timestamp TIMESTAMP,
z_timestamp_with_time_zone TIMESTAMP WITH TIME ZONE,
-- Network Address
z_ipaddress_v4 IPADDRESS,
z_ipaddress_v6 IPADDRESS,
-- UUID
z_uuid UUID
)
"""
LOAD_QUERY = """
INSERT INTO memory.default.test_multiple_types VALUES(
-- Boolean
true, -- z_boolean BOOLEAN,
-- Integers
CAST(POW(2, 7 ) - 42 AS TINYINT), -- z_tinyint TINYINT,
CAST(POW(2, 15) - 42 AS SMALLINT), -- z_smallint SMALLINT,
CAST(POW(2, 31) - 42 AS INTEGER), -- z_integer INTEGER,
CAST(POW(2, 32) - 42 AS BIGINT) * 2, -- z_bigint BIGINT,
-- Floating-Point
REAL '42', -- z_real REAL,
DOUBLE '1.03e42', -- z_double DOUBLE,
-- Floating-Point
DECIMAL '1.1', -- z_decimal DECIMAL(10, 2),
-- String
U&'Hello winter \2603 !', -- z_vaarchar VARCHAR(20),
'cat', -- z_char CHAR(20),
X'65683F', -- z_varbinary VARBINARY,
CAST('["A", 1, true]' AS JSON), -- z_json JSON,
-- Date and Time
DATE '2001-08-22', -- z_date DATE,
TIME '01:02:03.456', -- z_time TIME,
TIME '01:02:03.456 America/Los_Angeles', -- z_time_with_time_zone TIME WITH TIME ZONE,
TIMESTAMP '2001-08-22 03:04:05.321', -- z_timestamp TIMESTAMP,
TIMESTAMP '2001-08-22 03:04:05.321 America/Los_Angeles', -- z_timestamp_with_time_zone TIMESTAMP WITH TIME
-- ZONE,
-- Network Address
IPADDRESS '10.0.0.1', -- z_ipaddress_v4 IPADDRESS,
IPADDRESS '2001:db8::1', -- z_ipaddress_v6 IPADDRESS,
-- UUID
UUID '12151fd2-7586-11e9-8f9e-2a86e4085a59' -- z_uuid UUID
)
"""
DELETE_QUERY = "DROP TABLE memory.default.test_multiple_types"
@pytest.mark.integration("presto")
class PrestoToGCSSystemTest(GoogleSystemTest):
@staticmethod
def init_connection():
with create_session() as session:
session.query(Connection).filter(Connection.conn_id == "presto_default").delete()
session.merge(
Connection(
conn_id="presto_default", conn_type="conn_type", host="presto", port=8080, login="airflow"
)
)
@staticmethod
def init_db():
hook = PrestoHook()
with hook.get_conn() as conn:
with closing(conn.cursor()) as cur:
cur.execute(CREATE_QUERY)
# Presto does not execute queries until the result is fetched. :-(
cur.fetchone()
cur.execute(LOAD_QUERY)
cur.fetchone()
@staticmethod
def drop_db():
hook = PrestoHook()
with hook.get_conn() as conn:
with closing(conn.cursor()) as cur:
cur.execute(DELETE_QUERY)
# Presto does not execute queries until the result is fetched. :-(
cur.fetchone()
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
self.init_connection()
self.create_gcs_bucket(GCS_BUCKET)
with suppress(Exception):
self.drop_db()
self.init_db()
self.execute_with_ctx([
"bq", "rm", "--recursive", "--force", f"{self._project_id()}:{DATASET_NAME}"
], key=GCP_BIGQUERY_KEY)
@provide_gcp_context(GCP_BIGQUERY_KEY)
def test_run_example_dag(self):
self.run_dag("example_presto_to_gcs", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCS_BUCKET)
self.drop_db()
self.execute_with_ctx([
"bq", "rm", "--recursive", "--force", f"{self._project_id()}:{DATASET_NAME}"
], key=GCP_BIGQUERY_KEY)
super().tearDown()
|
apache-2.0
| 5,968,015,441,872,007,000 | 37.789157 | 110 | 0.588135 | false |
AnselCmy/ARPS
|
ML_model/test/report_classifier.py
|
1
|
2434
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
from __future__ import print_function
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import pymongo as pm
sys.path.append('..')
from model.main import Multi_Label_Model
def check_contain_chinese(check_str):
for ch in check_str.decode('utf-8'):
if u'\u4e00' <= ch <= u'\u9fff':
return True
return False
def check_is_cs(check_str):
if check_str[-3:] == '001':
return True
else:
return False
def classify():
# Initial for model
model = Multi_Label_Model()
labels = [u'计算机网络', u'信息安全', u'云计算&大数据', u'机器学习&模式识别', u'数据科学',
u'计算机图形学&图像处理', u'计算机教学', u'数据库', u'计算机组成与结构', u'人机交互',
u'软件技术', u'计算机应用', u'信息检索', u'物联网', u'多媒体技术']
labels = list(range(len(labels)))
model.load_word2vec_model('model/word2vec_model.txt')
model.load_scaler('model/scaler.txt')
model.load_model('model')
# Initial for mongodb
conn = pm.MongoClient('localhost', 27017)
db = conn.get_database('report_db')
col = db.get_collection('reports_without_label')
new_col = db.get_collection('reports_with_label')
# For clean the content
punc1 = re.compile(r"[。,、;:()\——《》【】\n,.;:()\-<>\[\]]")
punc2 = re.compile(r"[\“\”\‘\’\"\']")
stop_words = get_stop_words('stop_words.txt')
# Deal with reports which are not labeled
for c in col.find({'is_labeled': 0}):
# Classify
if check_contain_chinese(c['title']) and check_is_cs(c['faculty']):
# Get label list by model
new_label = model.predict_class_with_string([c['title']], labels)[0]
# Set label to this report
col.update({'_id': c['_id']}, {'$set': {'label': new_label, 'click': 0, 'is_labeled': 1}})
# Get the content by id
new_doc = col.find({'_id': c['_id']})[0]
# Delete the old id
del new_doc['_id']
# Insert labeled report to new col
new_col.insert(new_doc)
# Clean to get the content
if c.has_key('abstract'):
content = c['title'] + c['abstract']
else:
content = c['title']
content = re.sub(punc1, ' ', content)
content = re.sub(punc2, '', content)
content = jieba.cut(content)
content = [s for s in content if s not in stop_words]
content = ' '.join(content)
new_col.update({'title': c['title']}, {'$set': {'content': content}})
if __name__ == '__main__':
classify()
|
mit
| 1,969,431,880,389,390,300 | 28.142857 | 93 | 0.628788 | false |
teamCarel/EyeTracker
|
src/capture/eye.py
|
1
|
20620
|
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import os
import platform
class Global_Container(object):
pass
class Is_Alive_Manager(object):
'''
A context manager to wrap the is_alive flag.
Is alive will stay true as long is the eye process is running.
'''
def __init__(self, is_alive, ipc_socket, eye_id):
self.is_alive = is_alive
self.ipc_socket = ipc_socket
self.eye_id = eye_id
def __enter__(self):
self.is_alive.value = True
self.ipc_socket.notify({'subject': 'eye_process.started',
'eye_id': self.eye_id})
def __exit__(self, type, value, traceback):
if type is not None:
pass # Exception occurred
self.is_alive.value = False
self.ipc_socket.notify({'subject': 'eye_process.stopped',
'eye_id': self.eye_id})
def eye(timebase, is_alive_flag, ipc_pub_url, ipc_sub_url, ipc_push_url,
user_dir, eye_id, overwrite_cap_settings=None): # version after userdir
"""reads eye video and detects the pupil.
Creates a window, gl context.
Grabs images from a capture.
Streams Pupil coordinates.
Reacts to notifications:
``set_detection_mapping_mode``: Sets detection method
``eye_process.should_stop``: Stops the eye process
``recording.started``: Starts recording eye video
``recording.stopped``: Stops recording eye video
``frame_publishing.started``: Starts frame publishing
``frame_publishing.stopped``: Stops frame publishing
Emits notifications:
``eye_process.started``: Eye process started
``eye_process.stopped``: Eye process stopped
Emits data:
``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
"""
# We deferr the imports becasue of multiprocessing.
# Otherwise the world process each process also loads the other imports.
import zmq
import zmq_tools
zmq_ctx = zmq.Context()
ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
notify_sub = zmq_tools.Msg_Receiver(zmq_ctx, ipc_sub_url, topics=("notify",))
with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):
# logging setup
import logging
logging.getLogger("OpenGL").setLevel(logging.ERROR)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
# create logger for the context of this function
logger = logging.getLogger(__name__)
# general imports
import numpy as np
import cv2
# display
import glfw
from pyglui import ui, cygl
from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
from pyglui.cygl.utils import Named_Texture
from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
from gl_utils import make_coord_system_pixel_based
from gl_utils import make_coord_system_norm_based
from gl_utils import is_window_visible
from ui_roi import UIRoi
# monitoring
import psutil
# helpers/utils
from uvc import get_time_monotonic
from file_methods import Persistent_Dict
from methods import normalize, denormalize, timer
from video_capture import source_classes
from video_capture import manager_classes
# Pupil detectors
from pupil_detectors import Detector_2D, Detector_3D
pupil_detectors = {Detector_2D.__name__: Detector_2D,
Detector_3D.__name__: Detector_3D}
# UI Platform tweaks
if platform.system() == 'Linux':
scroll_factor = 10.0
window_position_default = (600, 300 * eye_id)
elif platform.system() == 'Windows':
scroll_factor = 10.0
window_position_default = (600,31+ 300 * eye_id)
else:
scroll_factor = 1.0
window_position_default = (600, 300 * eye_id)
# g_pool holds variables for this process
g_pool = Global_Container()
# make some constants avaiable
g_pool.user_dir = user_dir
#g_pool.version = version
g_pool.app = 'capture'
g_pool.process = 'eye{}'.format(eye_id)
g_pool.timebase = timebase
g_pool.ipc_pub = ipc_socket
def get_timestamp():
return get_time_monotonic() - g_pool.timebase.value
g_pool.get_timestamp = get_timestamp
g_pool.get_now = get_time_monotonic
# Callback functions
def on_resize(window, w, h):
if is_window_visible(window):
active_window = glfw.glfwGetCurrentContext()
glfw.glfwMakeContextCurrent(window)
g_pool.gui.update_window(w, h)
adjust_gl_view(w, h)
glfw.glfwMakeContextCurrent(active_window)
def on_key(window, key, scancode, action, mods):
g_pool.gui.update_key(key, scancode, action, mods)
def on_char(window, char):
g_pool.gui.update_char(char)
def on_iconify(window, iconified):
g_pool.iconified = iconified
def on_button(window, button, action, mods):
if g_pool.display_mode == 'roi':
if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
g_pool.u_r.active_edit_pt = False
# if the roi interacts we dont want
# the gui to interact as well
return
elif action == glfw.GLFW_PRESS:
pos = glfw.glfwGetCursorPos(window)
pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
if g_pool.flip:
pos = 1 - pos[0], 1 - pos[1]
# Position in img pixels
pos = denormalize(pos,g_pool.capture.frame_size) # Position in img pixels
if g_pool.u_r.mouse_over_edit_pt(pos, g_pool.u_r.handle_size + 40,g_pool.u_r.handle_size + 40):
# if the roi interacts we dont want
# the gui to interact as well
return
g_pool.gui.update_button(button, action, mods)
def on_pos(window, x, y):
hdpi_factor = glfw.glfwGetFramebufferSize(
window)[0] / glfw.glfwGetWindowSize(window)[0]
g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)
if g_pool.u_r.active_edit_pt:
pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
if g_pool.flip:
pos = 1-pos[0],1-pos[1]
pos = denormalize(pos,g_pool.capture.frame_size )
g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)
def on_scroll(window, x, y):
g_pool.gui.update_scroll(x, y * scroll_factor)
def on_close(window):
glfw.glfwHideWindow(window)
glfw.glfwSetWindowShouldClose(window,-1)
def replace_source(source_class_name,source_settings):
g_pool.capture.cleanup()
g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
g_pool.capture.init_gui()
# load session persistent settings
session_settings = Persistent_Dict(os.path.join(g_pool.user_dir, 'user_settings_eye{}'.format(eye_id)))
g_pool.iconified = False
g_pool.capture = None
g_pool.capture_manager = None
g_pool.flip = True #TODO: to flip or not to flip? #session_settings.get('flip', Tr)
g_pool.display_mode = 'camera_image'
g_pool.display_mode_info_text = {'camera_image': "Raw eye camera image. This uses the least amount of CPU power",
'roi': "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
'algorithm': "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."}
capture_manager_settings = session_settings.get('capture_manager_settings', ('UVC_Manager',{}))
manager_class_name, manager_settings = capture_manager_settings
manager_class_by_name = {c.__name__:c for c in manager_classes}
g_pool.capture_manager = manager_class_by_name[manager_class_name](g_pool,**manager_settings)
# Initialize capture
default_settings = ('UVC_Source',{
'preferred_names' : ["Pupil Cam1 ID0"],
'frame_size': (640,480),
'frame_rate': 90
})
capture_source_settings = overwrite_cap_settings or session_settings.get('capture_settings', default_settings)
source_class_name, source_settings = capture_source_settings
source_class_by_name = {c.__name__:c for c in source_classes}
g_pool.capture = source_class_by_name[source_class_name](g_pool,**source_settings)
assert g_pool.capture
g_pool.u_r = UIRoi((g_pool.capture.frame_size[1],g_pool.capture.frame_size[0]))
roi_user_settings = session_settings.get('roi')
if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
g_pool.u_r.set(roi_user_settings)
pupil_detector_settings = session_settings.get('pupil_detector_settings', None)
#TODO set manually
last_pupil_detector = pupil_detectors[session_settings.get('last_pupil_detector', Detector_3D.__name__)]
g_pool.pupil_detector = last_pupil_detector(g_pool, pupil_detector_settings)
# UI callback functions
def set_display_mode_info(val):
g_pool.display_mode = val
g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]
# Initialize glfw
glfw.glfwInit()
title = "Pupil Capture - eye {}".format(eye_id)
width, height = session_settings.get(
'window_size', g_pool.capture.frame_size)
main_window = glfw.glfwCreateWindow(width, height, title, None, None)
window_pos = session_settings.get(
'window_position', window_position_default)
glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
glfw.glfwMakeContextCurrent(main_window)
cygl.utils.init()
# gl_state settings
basic_gl_setup()
g_pool.image_tex = Named_Texture()
g_pool.image_tex.update_from_ndarray(np.ones((1,1),dtype=np.uint8)+125)
# setup GUI
g_pool.gui = ui.UI()
def replace_manager(manager_class):
g_pool.capture_manager.cleanup()
g_pool.capture_manager = manager_class(g_pool)
g_pool.capture_manager.init_gui()
# Register callbacks main_window
glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
glfw.glfwSetKeyCallback(main_window, on_key)
glfw.glfwSetCharCallback(main_window, on_char)
glfw.glfwSetMouseButtonCallback(main_window, on_button)
glfw.glfwSetCursorPosCallback(main_window, on_pos)
glfw.glfwSetScrollCallback(main_window, on_scroll)
glfw.glfwSetWindowCloseCallback(main_window, on_close)
# set the last saved window size
on_resize(main_window, *glfw.glfwGetWindowSize(main_window))
# load last gui configuration
g_pool.gui.configuration = session_settings.get('ui_config', {})
should_publish_frames = False
frame_publish_format = 'jpeg'
# create a timer to control window update frequency
window_update_timer = timer(1 / 60)
def window_should_update():
return next(window_update_timer)
logger.warning('Process started.')
# TODO remove and start eye process from our GUI
glfw.glfwHideWindow(main_window)
frame = None
# Event loop
while is_alive_flag:
if notify_sub.new_data:
t, notification = notify_sub.recv()
subject = notification['subject']
if subject == 'eye_process.should_stop':
break
elif subject == 'show_eye_cam':
glfw.glfwShowWindow(main_window)
elif subject.startswith('meta.should_doc'):
ipc_socket.notify({
'subject': 'meta.doc',
'actor': 'eye{}'.format(eye_id),
'doc': eye.__doc__
})
elif subject.startswith('frame_publishing.started'):
should_publish_frames = True
frame_publish_format = notification.get('format', 'jpeg')
elif subject.startswith('frame_publishing.stopped'):
should_publish_frames = False
frame_publish_format = 'jpeg'
elif subject.startswith('start_eye_capture') and notification['target'] == g_pool.process:
replace_source(notification['name'],notification['args'])
g_pool.capture.on_notify(notification)
# Get an image from the grabber
event = {}
g_pool.capture.recent_events(event)
frame = event.get('frame')
g_pool.capture_manager.recent_events(event)
if frame:
f_width, f_height = g_pool.capture.frame_size
if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (f_height, f_width):
g_pool.u_r = UIRoi((f_height, f_width))
if should_publish_frames and frame.jpeg_buffer:
if frame_publish_format == "jpeg":
data = frame.jpeg_buffer
elif frame_publish_format == "yuv":
data = frame.yuv_buffer
elif frame_publish_format == "bgr":
data = frame.bgr
elif frame_publish_format == "gray":
data = frame.gray
pupil_socket.send('frame.eye.%s'%eye_id,{
'width': frame.width,
'height': frame.width,
'index': frame.index,
'timestamp': frame.timestamp,
'format': frame_publish_format,
'__raw_data__': [data]
})
# pupil ellipse detection
result = g_pool.pupil_detector.detect(frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
result['id'] = eye_id
# stream the result
pupil_socket.send('pupil.%s'%eye_id,result)
# GL drawing
if window_should_update():
if is_window_visible(main_window):
glfw.glfwMakeContextCurrent(main_window)
clear_gl_screen()
if frame:
# switch to work in normalized coordinate space
if g_pool.display_mode == 'algorithm':
g_pool.image_tex.update_from_ndarray(frame.img)
elif g_pool.display_mode in ('camera_image', 'roi'):
g_pool.image_tex.update_from_ndarray(frame.gray)
else:
pass
make_coord_system_norm_based(g_pool.flip)
g_pool.image_tex.draw()
f_width, f_height = g_pool.capture.frame_size
make_coord_system_pixel_based((f_height, f_width, 3), g_pool.flip)
if frame:
if result['method'] == '3d c++':
eye_ball = result['projected_sphere']
try:
pts = cv2.ellipse2Poly(
(int(eye_ball['center'][0]),
int(eye_ball['center'][1])),
(int(eye_ball['axes'][0] / 2),
int(eye_ball['axes'][1] / 2)),
int(eye_ball['angle']), 0, 360, 8)
except ValueError as e:
pass
else:
draw_polyline(pts, 2, RGBA(0., .9, .1, result['model_confidence']))
if result['confidence'] > 0:
if 'ellipse' in result:
pts = cv2.ellipse2Poly(
(int(result['ellipse']['center'][0]),
int(result['ellipse']['center'][1])),
(int(result['ellipse']['axes'][0] / 2),
int(result['ellipse']['axes'][1] / 2)),
int(result['ellipse']['angle']), 0, 360, 15)
confidence = result['confidence'] * 0.7
draw_polyline(pts, 1, RGBA(1., 0, 0, confidence))
draw_points([result['ellipse']['center']],
size=20,
color=RGBA(1., 0., 0., confidence),
sharpness=1.)
# render GUI
g_pool.gui.update()
# render the ROI
g_pool.u_r.draw(g_pool.gui.scale)
if g_pool.display_mode == 'roi':
g_pool.u_r.draw_points(g_pool.gui.scale)
# update screen
glfw.glfwSwapBuffers(main_window)
glfw.glfwPollEvents()
g_pool.pupil_detector.visualize() # detector decides if we visualize or not
# END while running
glfw.glfwRestoreWindow(main_window) # need to do this for windows os
# save session persistent settings
session_settings['gui_scale'] = g_pool.gui.scale
session_settings['roi'] = g_pool.u_r.get()
session_settings['flip'] = g_pool.flip
session_settings['display_mode'] = g_pool.display_mode
session_settings['ui_config'] = g_pool.gui.configuration
session_settings['capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict()
session_settings['capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict()
session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
session_settings['window_position'] = glfw.glfwGetWindowPos(main_window)
#session_settings['version'] = g_pool.version
session_settings['last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
session_settings['pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
session_settings.close()
g_pool.capture.deinit_gui()
g_pool.gui.terminate()
glfw.glfwDestroyWindow(main_window)
glfw.glfwTerminate()
g_pool.capture_manager.cleanup()
g_pool.capture.cleanup()
logger.info("Process shutting down.")
|
lgpl-3.0
| 3,157,418,032,018,782,700 | 42.535637 | 220 | 0.533511 | false |
cogeorg/BlackRhino
|
networkx/drawing/nx_pylab.py
|
1
|
32861
|
# Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg (hagberg@lanl.gov)
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell']
def draw(G, pos=None, ax=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
raise
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
import collections
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = list(G)
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, collections.Iterable):
node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax)
alpha = None
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
"""Apply an alpha (or list of alphas) to the colors provided.
Parameters
----------
color : color string, or array of floats
Color of element. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
alpha : float or array of floats
Alpha values for elements. This can be a single alpha value, in
which case it will be applied to all the elements of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
elem_list : array of networkx objects
The list of elements which are being colored. These could be nodes, edges
or labels.
cmap : matplotlib colormap
Color map for use if colors is a list of floats corresponding to points on
a color mapping.
vmin, vmax : float
Minimum and maximum values for normalizing colors if a color mapping is used.
Returns
-------
rgba_colors : numpy ndarray
Array containing RGBA format values for each of the node colours.
"""
import numbers
import itertools
try:
import numpy
from matplotlib.colors import colorConverter
import matplotlib.cm as cm
except ImportError:
raise ImportError("Matplotlib required for draw()")
# If we have been provided with a list of numbers as long as elem_list, apply the color mapping.
if len(colors) == len(elem_list) and isinstance(colors[0], numbers.Number):
mapper = cm.ScalarMappable(cmap=cmap)
mapper.set_clim(vmin, vmax)
rgba_colors = mapper.to_rgba(colors)
# Otherwise, convert colors to matplotlib's RGB using the colorConverter object.
# These are converted to numpy ndarrays to be consistent with the to_rgba method of ScalarMappable.
else:
try:
rgba_colors = numpy.array([colorConverter.to_rgba(colors)])
except ValueError:
rgba_colors = numpy.array([colorConverter.to_rgba(color) for color in colors])
# Set the final column of the rgba_colors to have the relevant alpha values.
try:
# If alpha is longer than the number of colors, resize to the number of elements.
# Also, if rgba_colors.size (the number of elements of rgba_colors) is the same as the number of
# elements, resize the array, to avoid it being interpreted as a colormap by scatter()
if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
rgba_colors.resize((len(elem_list), 4))
rgba_colors[1:, 0] = rgba_colors[0, 0]
rgba_colors[1:, 1] = rgba_colors[0, 1]
rgba_colors[1:, 2] = rgba_colors[0, 2]
rgba_colors[:, 3] = list(itertools.islice(itertools.cycle(alpha), len(rgba_colors)))
except TypeError:
rgba_colors[:, -1] = alpha
return rgba_colors
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
|
gpl-3.0
| 2,971,644,789,041,964,000 | 30.177419 | 124 | 0.581601 | false |
svidela/pyzcasp
|
pyzcasp/asp/adapters.py
|
1
|
3307
|
# Copyright (c) 2014, Santiago Videla
#
# This file is part of pyzcasp.
#
# caspo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# caspo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caspo. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from zope import component
import re, os
from interfaces import *
from impl import *
class TermSetAdapter(object):
interface.implements(ITermSet)
def __init__(self):
super(TermSetAdapter, self).__init__()
self._termset = TermSet()
@property
def score(self):
return self._termset.score
def add(self, term):
self._termset.add(term)
def union(self, other):
return self._termset.union(other)
def to_str(self, pprint=False):
return self._termset.to_str(pprint)
def to_file(self, filename=None, pprint=False):
return self._termset.to_file(filename, pprint)
def pprint(self):
self._termset.pprint()
def __iter__(self):
return iter(self._termset)
def __len__(self):
return len(self._termset)
class AnswerSet2TermSet(TermSetAdapter):
component.adapts(IAnswerSet)
def __init__(self, answer):
super(AnswerSet2TermSet, self).__init__()
parser = Grammar()
for atom in answer.atoms:
# raise pyparsing.ParseException if cannot parse
self._termset.add(parser.parse(atom))
self._termset.score = answer.score
class GrounderSolver(object):
interface.implements(IGrounderSolver)
component.adapts(IGrounder, ISolver)
def __init__(self, grounder, solver):
super(GrounderSolver, self).__init__()
self.grounder = grounder
self.solver = solver
def run(self, lp="", grounder_args=[], solver_args=[], adapter=None, termset_filter=None):
if lp and '-' not in grounder_args:
grounder_args.append('-')
grounding, code = self.grounder.execute(lp, *grounder_args)
self.solver.execute(grounding, *solver_args)
return IAnswerSetsProcessing(self.solver).processing(adapter, termset_filter)
class AnswerSetsProcessing(object):
component.adapts(ISolver)
interface.implements(IAnswerSetsProcessing)
def __init__(self, solver):
self.solver = solver
def processing(self, adapter=None, termset_filter=None):
ans = []
for answer in self.solver.answers():
if adapter:
ans.append(adapter(answer))
else:
ts = ITermSet(answer)
if termset_filter:
ts = TermSet(filter(termset_filter, ts), ts.score)
ans.append(ts)
return ans
|
gpl-3.0
| 265,944,677,918,393,200 | 29.62037 | 94 | 0.614454 | false |
kalhartt/pycclone
|
pycclone/source.py
|
1
|
6074
|
"""
#pycclone.source
Provides a class for handling source files. The primary goal is to create a
generator yielding (docs, code) tuples from a source file.
"""
import logging
import re
import os
import pycclone.utils as utils
import pycclone.languages as languages
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
log = logging.getLogger(__name__)
class ParseError(Exception):
"""
ParseError raises when unexpected things happen while separating the
source file.
"""
pass
class Source(object):
"""
Class for source code file.
"""
def __init__(self, fname):
"""
Create Source object.
"""
log.info('Creating Source object for %s', fname)
self.fname = fname # filename including path
self.detect_language()
###Helper Methods
def detect_language(self, language=None):
"""
Detects language from extension or argument.
"""
log.info('Detecting language for %s', self.fname)
if language:
self.lang = language
else:
ext = os.path.splitext(self.fname)[1]
self.lang = languages.get_by_ext(ext)
self.ms = self.lang['multistart']
self.me = self.lang['multiend']
self.multi_re = re.compile('%s.*?%s' % (self.me, self.ms))
log.debug('Detected %s for %s', self.lang['name'], self.fname)
def read_multiline(self, line, f, indent):
"""
Reads the file until the end of a multiline section.
It may return multiple multiline sections as one if a new
multiline section begins on the same line that one ends.
"""
log.debug('Beginning multiline search at position %d in %s', f.tell(), self.fname)
result = ''
n = line.find(self.ms)
if n >= 0:
line = line[n + len(self.ms):]
while line:
if line[:indent].isspace() and len(line) > indent:
line = line[indent:]
if self.me in self.multi_re.sub('', line):
result += ''.join(line.rsplit(self.me, 1))
break
result += line
line = f.readline()
else:
raise ParseError('Unexpected EOF while parsing %s.' % self.fname)
return result
# === Access Methods ===
def read_sections(self):
"""
Iterator yielding chunks of documentation and code in a tuple.
This algorithm will never support Whitespace...
"""
log.info('Reading sections in %s', self.fname)
with open(self.fname, 'rU') as f:
docs = ''
code = ''
buff = ''
# Iterate the file
indent_re = re.compile(r'\s*')
in_docs = False
line = f.readline()
while line:
line_strip = line.strip()
# Add blank lines to the nearest code block
# Ignore blank lines between docs
if not line_strip:
if not in_docs:
code += line
else:
buff += line
line = f.readline()
continue
# Determine if the line is documentation or starts multiline
# documentation
line_docs = line_strip.startswith(self.lang['symbol'])
line_multi = line_strip.startswith(self.ms)
# If we are starting a new section, yield previous section
if not in_docs and (line_docs or line_multi) and (docs or code):
yield (docs, code)
docs = ''
code = ''
if line_multi:
# Starting multiline comment
in_docs = True
indent = len(indent_re.match(line).group())
docs += self.read_multiline(line, f, indent)
elif line_docs:
# Starting a single line comment
in_docs = True
index = line.find(self.lang['symbol']) + len(self.lang['symbol'])
docs += line[index:]
elif self.ms in line_strip:
# Multiline docs in code block
in_docs = False
indent = len(indent_re.match(line).group())
code += buff + self.read_multiline(line, f, indent)
else:
# Code block
in_docs = False
code += buff + line
# reset loop
buff = ''
line = f.readline()
# Final yield
yield (docs, code)
def format_sections(self, formatter, highlighter):
for docs, code in self.read_sections():
yield (formatter.format(docs, self.lang), highlighter.highlight(code, self.lang))
# === Generating Method ===
def generate_docs(self, template, formatter, highlighter, outdir):
"""
Generates and writes the documentation for the source file.
If outdir is false, this will return the generated document.
Otherwise it returns None.
"""
log.info('Generating docs for %s', self.fname)
try:
if outdir:
outname = utils.destination(self.fname, outdir) + template.ext
destdir = os.path.dirname(outname)
if not os.path.isdir(destdir):
os.makedirs(destdir)
f = open(outname, 'w')
else:
f = StringIO()
for line in template.generate_docs(self.format_sections(formatter, highlighter)):
f.write(line)
log.info('Documentation written for %s', self.fname)
if not outdir:
result = f.getvalue()
else:
result = None
finally:
f.close()
return result
|
mit
| -2,936,048,722,182,657,500 | 28.921182 | 93 | 0.515311 | false |
googleapis/googleapis-gen
|
google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/enums/types/campaign_serving_status.py
|
1
|
1207
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.enums',
marshal='google.ads.googleads.v8',
manifest={
'CampaignServingStatusEnum',
},
)
class CampaignServingStatusEnum(proto.Message):
r"""Message describing Campaign serving statuses. """
class CampaignServingStatus(proto.Enum):
r"""Possible serving statuses of a campaign."""
UNSPECIFIED = 0
UNKNOWN = 1
SERVING = 2
NONE = 3
ENDED = 4
PENDING = 5
SUSPENDED = 6
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| 5,789,341,695,327,524,000 | 28.439024 | 74 | 0.67937 | false |
jcassee/django-geckoboard
|
django_geckoboard/decorators.py
|
1
|
18337
|
"""
Geckoboard decorators.
"""
from __future__ import absolute_import
from collections import OrderedDict
from functools import wraps
from hashlib import md5
from xml.dom.minidom import Document
import base64
import json
from Crypto import Random
from Crypto.Cipher import AES
from django.conf import settings
from django.http import HttpResponse, HttpResponseForbidden
from django.utils.decorators import available_attrs
from django.views.decorators.csrf import csrf_exempt
import six
TEXT_NONE = 0
TEXT_INFO = 2
TEXT_WARN = 1
class WidgetDecorator(object):
"""
Geckoboard widget decorator.
The decorated view must return a data structure suitable for
serialization to XML or JSON for Geckoboard. See the Geckoboard
API docs or the source of extending classes for details.
If the ``GECKOBOARD_API_KEY`` setting is used, the request must
contain the correct API key, or a 403 Forbidden response is
returned.
If the ``encrypted` argument is set to True, then the data will be
encrypted using ``GECKOBOARD_PASSWORD`` (JSON only).
"""
def __new__(cls, *args, **kwargs):
obj = object.__new__(cls)
obj._encrypted = None
if 'encrypted' in kwargs:
obj._encrypted = kwargs.pop('encrypted')
obj._format = None
if 'format' in kwargs:
obj._format = kwargs.pop('format')
obj.data = kwargs
try:
return obj(args[0])
except IndexError:
return obj
def __call__(self, view_func):
def _wrapped_view(request, *args, **kwargs):
if not _is_api_key_correct(request):
return HttpResponseForbidden("Geckoboard API key incorrect")
view_result = view_func(request, *args, **kwargs)
data = self._convert_view_result(view_result)
try:
self.data.update(data)
except ValueError:
self.data = data
content, content_type = _render(request, self.data, self._encrypted, self._format)
return HttpResponse(content, content_type=content_type)
wrapper = wraps(view_func, assigned=available_attrs(view_func))
return csrf_exempt(wrapper(_wrapped_view))
def _convert_view_result(self, data):
# Extending classes do view result mangling here.
return data
widget = WidgetDecorator
class NumberWidgetDecorator(WidgetDecorator):
"""
Geckoboard Number widget decorator.
The decorated view must return a tuple `(current, [previous])`, where
`current` is the current value and `previous` is the previous value
of the measured quantity..
"""
def _convert_view_result(self, result):
if not isinstance(result, (tuple, list)):
result = [result]
result = list(result)
for k, v in enumerate(result):
result[k] = v if isinstance(v, dict) else {'value': v}
return {'item': result}
number_widget = NumberWidgetDecorator
class RAGWidgetDecorator(WidgetDecorator):
"""
Geckoboard Red-Amber-Green (RAG) widget decorator.
The decorated view must return a tuple with three tuples `(value,
[text])`. The `value` parameters are the numbers shown in red,
amber and green (in that order). The `text` parameters are optional
and will be displayed next to the respective values in the
dashboard.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
if elem[0] is None:
item['value'] = ''
else:
item['value'] = elem[0]
if len(elem) > 1:
item['text'] = elem[1]
items.append(item)
return {'item': items}
rag_widget = RAGWidgetDecorator
class TextWidgetDecorator(WidgetDecorator):
"""
Geckoboard Text widget decorator.
The decorated view must return a list of tuples `(message, [type])`.
The `message` parameters are strings that will be shown in the
widget. The `type` parameters are optional and tell Geckoboard how
to annotate the messages. Use ``TEXT_INFO`` for informational
messages, ``TEXT_WARN`` for for warnings and ``TEXT_NONE`` for plain
text (the default).
"""
def _convert_view_result(self, result):
items = []
if not isinstance(result, (tuple, list)):
result = [result]
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['text'] = elem[0]
if len(elem) > 1 and elem[1] is not None:
item['type'] = elem[1]
else:
item['type'] = TEXT_NONE
items.append(item)
return {'item': items}
text_widget = TextWidgetDecorator
class PieChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Pie chart decorator.
The decorated view must return a list of tuples `(value, label,
color)`. The color parameter is a string 'RRGGBB[TT]' representing
red, green, blue and optionally transparency.
"""
def _convert_view_result(self, result):
items = []
for elem in result:
if not isinstance(elem, (tuple, list)):
elem = [elem]
item = OrderedDict()
item['value'] = elem[0]
if len(elem) > 1:
item['label'] = elem[1]
if len(elem) > 2:
item['colour'] = elem[2]
items.append(item)
return {'item': items}
pie_chart = PieChartWidgetDecorator
class LineChartWidgetDecorator(WidgetDecorator):
"""
Geckoboard Line chart decorator.
The decorated view must return a tuple `(values, x_axis, y_axis,
[color])`. The `values` parameter is a list of data points. The
`x-axis` parameter is a label string or a list of strings, that will
be placed on the X-axis. The `y-axis` parameter works similarly for
the Y-axis. If there are more than one axis label, they are placed
evenly along the axis. The optional `color` parameter is a string
``'RRGGBB[TT]'`` representing red, green, blue and optionally
transparency.
"""
def _convert_view_result(self, result):
data = OrderedDict()
data['item'] = list(result[0])
data['settings'] = OrderedDict()
if len(result) > 1:
x_axis = result[1]
if x_axis is None:
x_axis = ''
if not isinstance(x_axis, (tuple, list)):
x_axis = [x_axis]
data['settings']['axisx'] = x_axis
if len(result) > 2:
y_axis = result[2]
if y_axis is None:
y_axis = ''
if not isinstance(y_axis, (tuple, list)):
y_axis = [y_axis]
data['settings']['axisy'] = y_axis
if len(result) > 3:
data['settings']['colour'] = result[3]
return data
line_chart = LineChartWidgetDecorator
class GeckOMeterWidgetDecorator(WidgetDecorator):
"""
Geckoboard Geck-O-Meter decorator.
The decorated view must return a tuple `(value, min, max)`. The
`value` parameter represents the current value. The `min` and `max`
parameters represent the minimum and maximum value respectively.
They are either a value, or a tuple `(value, text)`. If used, the
`text` parameter will be displayed next to the minimum or maximum
value.
"""
def _convert_view_result(self, result):
value, min, max = result
data = OrderedDict()
data['item'] = value
data['max'] = OrderedDict()
data['min'] = OrderedDict()
if not isinstance(max, (tuple, list)):
max = [max]
data['max']['value'] = max[0]
if len(max) > 1:
data['max']['text'] = max[1]
if not isinstance(min, (tuple, list)):
min = [min]
data['min']['value'] = min[0]
if len(min) > 1:
data['min']['text'] = min[1]
return data
geck_o_meter = GeckOMeterWidgetDecorator
class FunnelWidgetDecorator(WidgetDecorator):
"""
Geckoboard Funnel decorator.
The decorated view must return a dictionary with at least an `items`
entry: `{'items': [(100, '100 %'), (50, '50 %')]}`.
Optional keys are:
type: 'standard' (default) or 'reverse'. Determines the
order of the colours.
percentage: 'show' (default) or 'hide'. Determines whether or
not the percentage value is shown.
sort: `False` (default) or `True`. Sort the entries by
value or not.
"""
def _convert_view_result(self, result):
data = OrderedDict()
items = result.get('items', [])
# sort the items in order if so desired
if result.get('sort'):
items.sort(reverse=True)
data["item"] = [{"value": k, "label": v} for k, v in items]
data["type"] = result.get('type', 'standard')
data["percentage"] = result.get('percentage', 'show')
return data
funnel = FunnelWidgetDecorator
class BulletWidgetDecorator(WidgetDecorator):
"""
See http://support.geckoboard.com/entries/274940-custom-chart-widget-type-definitions
for more information.
The decorated method must return a dictionary containing these keys:
Required keys:
label: Main label, eg. "Revenue 2011 YTD".
axis_points: Points on the axis, eg. [0, 200, 400, 600, 800, 1000].
current: Current value range, eg. 500 or [100, 500]. A singleton
500 is internally converted to [0, 500].
comparative: Comparative value, eg. 600.
Optional keys:
orientation: One of 'horizontal' or 'vertical'. Defaults to horizontal.
sublabel: Appears below main label.
red: Red start and end, eg. [0,100]. Defaults are calculated
from axis_points.
amber: Amber start and end, eg. [0,100]. Defaults are calculated
from axis_points.
green: Green start and end, eg. [0,100]. Defaults are calculated
from axis_points.
projected: Projected value range, eg. 900 or [100, 900]. A singleton
900 is internally converted to [0, 900].
auto_scale: If true then values will be scaled down if they
do not fit into Geckoboard's UI, eg. a value of 1100
is represented as 1.1. If scaling takes place the sublabel
is suffixed with that information. Default is true.
"""
def _convert_view_result(self, result):
# Check required keys. We do not do type checking since this level of
# competence is assumed.
for key in ('label', 'axis_points', 'current', 'comparative'):
if key not in result:
raise RuntimeError("Key %s is required" % key)
# Handle singleton current and projected
current = result['current']
projected = result.get('projected', None)
if not isinstance(current, (list, tuple)):
current = [0, current]
if (projected is not None) and not isinstance(projected, (list, tuple)):
projected = [0, projected]
# If red, amber and green are not *all* supplied calculate defaults
axis_points = result['axis_points']
red = result.get('red', None)
amber = result.get('amber', None)
green = result.get('green', None)
if (red is None) or (amber is None) or (green is None):
if axis_points:
max_point = max(axis_points)
min_point = min(axis_points)
third = (max_point - min_point) // 3
red = (min_point, min_point + third - 1)
amber = (min_point + third, max_point - third - 1)
green = (max_point - third, max_point)
else:
red = amber = green = (0, 0)
# Scan axis points for largest value and scale to avoid overflow in
# Geckoboard's UI.
auto_scale = result.get('auto_scale', True)
if auto_scale and axis_points:
scale_label_map = {1000000000: 'billions', 1000000: 'millions',
1000: 'thousands'}
scale = 1
value = max(axis_points)
for n in (1000000000, 1000000, 1000):
if value >= n:
scale = n
break
# Little fixedpoint helper.
# todo: use a fixedpoint library
def scaler(value, scale):
return float('%.2f' % (value*1.0 / scale))
# Apply scale to all values
if scale > 1:
axis_points = [scaler(v, scale) for v in axis_points]
current = (scaler(current[0], scale), scaler(current[1], scale))
if projected is not None:
projected = (scaler(projected[0], scale),
scaler(projected[1], scale))
red = (scaler(red[0], scale), scaler(red[1], scale))
amber = (scaler(amber[0], scale), scaler(amber[1], scale))
green = (scaler(green[0], scale), scaler(green[1], scale))
result['comparative'] = scaler(result['comparative'], scale)
# Suffix sublabel
sublabel = result.get('sublabel', '')
if sublabel:
result['sublabel'] = '%s (%s)' % (sublabel,
scale_label_map[scale])
else:
result['sublabel'] = scale_label_map[scale].capitalize()
# Assemble structure
data = dict(
orientation=result.get('orientation', 'horizontal'),
item=dict(
label=result['label'],
axis=dict(point=axis_points),
range=dict(
red=dict(start=red[0], end=red[1]),
amber=dict(start=amber[0], end=amber[1]),
green=dict(start=green[0], end=green[1])
),
measure=dict(current=dict(start=current[0], end=current[1])),
comparative=dict(point=result['comparative'])
)
)
# Add optional items
if 'sublabel' in result:
data['item']['sublabel'] = result['sublabel']
if projected is not None:
data['item']['measure']['projected'] = dict(start=projected[0],
end=projected[1])
return data
bullet = BulletWidgetDecorator
def _is_api_key_correct(request):
"""Return whether the Geckoboard API key on the request is correct."""
api_key = getattr(settings, 'GECKOBOARD_API_KEY', None)
if api_key is None:
return True
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2:
if auth[0].lower() == b'basic':
request_key = base64.b64decode(auth[1]).split(b':')[0]
return request_key == api_key
return False
def _derive_key_and_iv(password, salt, key_length, iv_length):
d = d_i = b''
while len(d) < key_length + iv_length:
d_i = md5(d_i + password + salt).digest()
d += d_i
return d[:key_length], d[key_length:key_length+iv_length]
def _encrypt(data):
"""Equivalent to OpenSSL using 256 bit AES in CBC mode"""
BS = AES.block_size
def pad(s):
n = BS - len(s) % BS
char = chr(n).encode('utf8')
return s + n * char
password = settings.GECKOBOARD_PASSWORD
salt = Random.new().read(BS - len('Salted__'))
key, iv = _derive_key_and_iv(password, salt, 32, BS)
cipher = AES.new(key, AES.MODE_CBC, iv)
encrypted = b'Salted__' + salt + cipher.encrypt(pad(data))
return base64.b64encode(encrypted)
def _render(request, data, encrypted, format=None):
"""
Render the data to Geckoboard. If the `format` parameter is passed
to the widget it defines the output format. Otherwise the output
format is based on the `format` request parameter.
A `format` paramater of ``json`` or ``2`` renders JSON output, any
other value renders XML.
"""
if not format:
format = request.POST.get('format', '')
if not format:
format = request.GET.get('format', '')
if format == 'json' or format == '2':
return _render_json(data, encrypted)
else:
return _render_xml(data, encrypted)
def _render_json(data, encrypted=False):
data_json = json.dumps(data).encode('utf8')
if encrypted:
data_json = _encrypt(data_json)
return data_json, 'application/json'
def _render_xml(data, encrypted=False):
if encrypted:
raise ValueError("encryption requested for XML output but unsupported")
doc = Document()
root = doc.createElement('root')
doc.appendChild(root)
_build_xml(doc, root, data)
return doc.toxml(), 'application/xml'
def _build_xml(doc, parent, data):
if isinstance(data, (tuple, list)):
_build_list_xml(doc, parent, data)
elif isinstance(data, dict):
_build_dict_xml(doc, parent, data)
else:
_build_str_xml(doc, parent, data)
def _build_str_xml(doc, parent, data):
parent.appendChild(doc.createTextNode(six.text_type(data)))
def _build_list_xml(doc, parent, data):
for item in data:
_build_xml(doc, parent, item)
def _build_dict_xml(doc, parent, data):
tags = sorted(data.keys()) # order tags testing ease
for tag in tags:
item = data[tag]
if isinstance(item, (list, tuple)):
for subitem in item:
elem = doc.createElement(tag)
_build_xml(doc, elem, subitem)
parent.appendChild(elem)
else:
elem = doc.createElement(tag)
_build_xml(doc, elem, item)
parent.appendChild(elem)
class GeckoboardException(Exception):
"""
Represents an error with the Geckoboard decorators.
"""
|
mit
| -4,231,649,804,232,618,500 | 33.274766 | 94 | 0.578012 | false |
eukaryote/asn1crypto
|
asn1crypto/_win/_ws2_32.py
|
1
|
2000
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import socket
from .._ffi import (
buffer_from_bytes,
bytes_from_buffer,
cast_void_p,
FFIEngineError,
is_null,
string_from_buffer,
unicode_buffer,
)
try:
from ._ws2_32_cffi import ws2_32
except (FFIEngineError):
from ._ws2_32_ctypes import ws2_32
AF_INET = 2
AF_INET6 = 23
def inet_ntop(address_family, packed_ip):
"""
Windows compatiblity shim for socket.inet_ntop().
:param address_family:
socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
:param packed_ip:
A byte string of the network form of an IP address
:return:
A unicode string of the IP address
"""
family = {
socket.AF_INET: AF_INET,
socket.AF_INET6: AF_INET6,
}[address_family]
buffer_size = 46 if family == AF_INET6 else 16
buffer = unicode_buffer(buffer_size)
packed_ip_buffer = buffer_from_bytes(packed_ip)
result = ws2_32.InetNtopW(family, cast_void_p(packed_ip_buffer), buffer, buffer_size)
if is_null(result):
raise OSError('Windows error %s calling InetNtop' % ws2_32.WSAGetLastError())
return string_from_buffer(buffer)
def inet_pton(address_family, ip_string):
"""
Windows compatiblity shim for socket.inet_ntop().
:param address_family:
socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
:param ip_string:
A unicode string of an IP address
:return:
A byte string of the network form of the IP address
"""
family = {
socket.AF_INET: AF_INET,
socket.AF_INET6: AF_INET6,
}[address_family]
buffer_size = 16 if family == AF_INET6 else 4
buffer = buffer_from_bytes(buffer_size)
result = ws2_32.InetPtonW(family, ip_string, buffer)
if result != 1:
raise OSError('Windows error %s calling InetPtob' % ws2_32.WSAGetLastError())
return bytes_from_buffer(buffer, buffer_size)
|
mit
| 7,021,345,125,814,088,000 | 24 | 89 | 0.6515 | false |
PLOS/allofplos
|
allofplos/elements/license.py
|
1
|
5138
|
import lxml.etree as et
import re
# Creative Commons links
xlink_href = '{http://www.w3.org/1999/xlink}href'
cc_by_4_link = 'https://creativecommons.org/licenses/by/4.0/'
cc_by_3_link = 'https://creativecommons.org/licenses/by/3.0/'
cc0_link = 'https://creativecommons.org/publicdomain/zero/1.0/'
cc_by_3_igo_link = 'https://creativecommons.org/licenses/by/3.0/igo/'
crown_link = 'http://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/'
cc_dict = {'CC-BY 4.0': cc_by_4_link,
'CC-BY 3.0': cc_by_3_link,
'CC0': cc0_link,
'CC-BY 3.0 IGO': cc_by_3_igo_link,
'Crown Copyright': crown_link,
}
class License():
"""For parsing the license element of articles."""
def __init__(self, permissions_element, doi):
"""Initialize an instance of the license class."""
self.element = permissions_element
self.doi = doi
def __iter__(self):
"""Provides the ability to cast License as a dictionary using
dict(License(…)).
Returns a generator of (key, value) tuples, which when passed into
dict(), will create the appropriate dictionary.
"""
return ((key, value) for key, value in self.license.items())
@property
def license(self):
"""Dictionary of CC license information from the article license field.
"""
lic = ''
cc_link = ''
copy_year = ''
copy_holder = ''
permissions = self.element
if permissions.xpath('./copyright-year'):
copy_year = int(permissions.xpath('./copyright-year')[0].text.strip())
if permissions.xpath('./copyright-holder'):
try:
copy_holder = ', '.join([x.text.strip() for x in permissions.xpath('./copyright-holder')])
except AttributeError:
print('error getting copyright holder for {}'.format(self.doi))
license = permissions.xpath('./license')[0]
if license.attrib.get(xlink_href):
cc_link = license.attrib[xlink_href]
elif license.xpath('.//ext-link'):
link = license.xpath('.//ext-link')[0]
cc_link = link.attrib[xlink_href]
if cc_link:
if cc_link == cc_by_4_link or any(x in cc_link for x in ["Attribution", "4.0"]):
lic = 'CC-BY 4.0'
elif cc_link == cc_by_3_igo_link or 'by/3.0/igo' in cc_link:
lic = 'CC-BY 3.0 IGO'
elif cc_link == cc_by_3_link or 'by/3.0' in cc_link:
lic = 'CC-BY 3.0'
elif cc_link == cc0_link or 'zero/1.0/' in cc_link:
lic = 'CC0'
elif cc_link == 'http://www.nationalarchives.gov.uk/doc/open-government-licence/open-government-licence.htm' \
or 'open-government-licence' in cc_link:
lic = "Crown Copyright"
elif cc_link == 'http://www.plos.org/oa/':
lic = 'CC-BY 3.0 IGO'
else:
print('not 4.0', self.doi, link.attrib[xlink_href])
lic = ''
else:
lic = self.parse_license(license)
lic_dict = {'license': lic,
'license_link': cc_dict.get(lic, ''),
'copyright_holder': copy_holder,
'copyright_year': copy_year}
return lic_dict
def parse_license(self, license):
"""For license elements without external links, figure out the appropriate copyright.
:param license_element: an article XML element with the tag <license>
:return: license name
"""
license_text = ' '.join(re.split('\+|\n|\t| ', et.tostring(license, method='text', encoding='unicode')))
license_text = ''.join(line.lstrip(' \t') for line in license_text.splitlines(True))
license_text = license_text.replace('\n', ' ').replace('\r', '')
if any(x in license_text.lower() for x in ["commons attribution license", "creative commons attrib"]):
lic = 'CC-BY 4.0'
if any(char.isdigit() for char in license_text):
digits = [char for char in license_text if char.isdigit()]
# Flag numbers in case it specifies a CC version number
print("Number found in CC license string for {}".format(self.doi), digits)
elif "commons public domain" in license_text.lower() or any(x in license_text for x in ['CC0', 'CCO public', "public domain"]):
lic = 'CC0'
elif "creative commons" in license_text.lower():
print(self.doi, 'unknown CC', license_text)
lic = ''
else:
if 'Public Library of Science Open-Access License' in license_text:
lic = 'CC-BY 4.0'
elif "crown copyright" in license_text.lower() or \
any(x in license_text for x in ['Open Government Licen', 'Public Sector Information Regulations']):
lic = 'Crown Copyright'
elif "WHO" in license_text:
lic = 'CC-BY 3.0 IGO'
else:
lic = 'CC-BY 4.0'
return lic
|
mit
| 7,154,977,985,650,922,000 | 44.052632 | 135 | 0.560358 | false |
JuezUN/INGInious
|
inginious/frontend/pages/course_admin/aggregation_info.py
|
1
|
2947
|
# -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
import web
from bson.objectid import ObjectId
from inginious.frontend.pages.course_admin.utils import make_csv, INGIniousAdminPage
class CourseAggregationInfoPage(INGIniousAdminPage):
""" List information about a aggregation """
def GET_AUTH(self, courseid, aggregationid): # pylint: disable=arguments-differ
""" GET request """
course, __ = self.get_course_and_check_rights(courseid)
if course.is_lti():
raise web.notfound()
return self.page(course, aggregationid)
def submission_url_generator(self, aggregationid, taskid):
""" Generates a submission url """
return "?format=taskid%2Faggregation&tasks=" + taskid + "&aggregations=" + str(aggregationid)
def page(self, course, aggregationid):
""" Get all data and display the page """
aggregation = self.database.aggregations.find_one({"_id": ObjectId(aggregationid)})
data = list(self.database.submissions.aggregate(
[
{
"$match":
{
"courseid": course.get_id(),
"username": {"$in": aggregation["students"]}
}
},
{
"$group":
{
"_id": "$taskid",
"tried": {"$sum": 1},
"succeeded": {"$sum": {"$cond": [{"$eq": ["$result", "success"]}, 1, 0]}},
"grade": {"$max": "$grade"}
}
}
]))
tasks = course.get_tasks()
result = dict([(taskid, {"taskid": taskid, "name": tasks[taskid].get_name_or_id(self.user_manager.session_language()), "tried": 0, "status": "notviewed",
"grade": 0, "url": self.submission_url_generator(aggregationid, taskid)}) for taskid in tasks])
for taskdata in data:
if taskdata["_id"] in result:
result[taskdata["_id"]]["tried"] = taskdata["tried"]
if taskdata["tried"] == 0:
result[taskdata["_id"]]["status"] = "notattempted"
elif taskdata["succeeded"]:
result[taskdata["_id"]]["status"] = "succeeded"
else:
result[taskdata["_id"]]["status"] = "failed"
result[taskdata["_id"]]["grade"] = taskdata["grade"]
if "csv" in web.input():
return make_csv(result)
results = sorted(list(result.values()), key=lambda result: (tasks[result["taskid"]].get_order(), result["taskid"]))
return self.template_helper.get_renderer().course_admin.aggregation_info(course, aggregation, results)
|
agpl-3.0
| 3,588,982,702,635,367,000 | 39.930556 | 161 | 0.522226 | false |
l11x0m7/Paper
|
Modulation/code/signal_analysis.py
|
1
|
7322
|
# -*- encoding:utf-8 -*-
import os
import sys
import logging
from copy import deepcopy
from matplotlib import pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
reload(sys)
def drawModulation(dirpath, rownum=200):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
filepaths = os.listdir(dirpath)
fileorder = 1
useful_filepaths = [f for f in filepaths if f.startswith('parse_mod')]
for filepath in useful_filepaths:
count = np.random.randint(1, rownum + 1)
with open(dirpath + '/' + filepath, 'rb') as fr:
x = list()
vals = list()
name = filepath
for i, line in enumerate(fr):
if i < count:
continue
if i > count:
break
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(2 * len(useful_filepaths), 1, fileorder * 2 - 1)
plt.plot(x, vals, color = ((fileorder * 20 + 25) % 255 / 255.,
(fileorder * 5 + 35) % 255 / 255.,
(fileorder * 30 + 45) % 255 / 255.))
plt.xlabel('symbol number')
plt.ylabel('signal amplitude')
plt.title(name)
fileorder += 1
plt.show()
def drawMixSignal(filepath, sample=5):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
with open(filepath, 'rb') as fr:
rowNumber = sum(1 for _ in fr)
with open(filepath, 'rb') as fr:
sampleSignals = set(np.random.choice(range(rowNumber), sample, replace=False))
rowOrder = 1
for i, line in enumerate(fr):
if i not in sampleSignals:
continue
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(sample, 1, rowOrder)
plt.plot(x, vals, color = ((rowOrder * 20 + 25) % 255 / 255.,
(rowOrder * 5 + 35) % 255 / 255.,
(rowOrder * 30 + 45) % 255 / 255.))
rowOrder += 1
plt.show()
def mixSignalAndTagging(dirpath='../data', savepath='../data/mixSignals.txt', modeSize=[]):
"""信号混叠和标注
对已有的信号进行混叠.
1-7分别对应:2ASK、QPSK、2FSK、2ASK+QPSK、2ASK+2FSK、QPSK+2FSK、2ASK+QPSK+2FSK
:param dirpath: signal path
:param modeSize: the sample size in each mode, from `1` to `n`
:return: mixed signal
"""
def tagger(tag):
"""
给样本打标签,目前手动填写标签类型
:param tag: like `1\t2`, `0\t2`, `0\t1\t2`
:return: `int` from 1 to 7 representing label
"""
if tag == '\t'.join(['0', ]):
return 1
elif tag == '\t'.join(['1', ]):
return 2
elif tag == '\t'.join(['2', ]):
return 3
elif tag == '\t'.join(['0', '1']):
return 4
elif tag == '\t'.join(['0', '2']):
return 5
elif tag == '\t'.join(['1', '2']):
return 6
elif tag == '\t'.join(['0', '1', '2']):
return 7
def C(n, m):
def calcNext(count, point, l, r, res, pre):
if(point > r):
return
if count == 1:
for i in xrange(point, r + 1):
pre.append(i)
res.append(deepcopy(pre))
pre.pop()
else:
for i in xrange(point, r + 1):
pre.append(i)
calcNext(count - 1, i + 1, l, r, res, pre)
pre.pop()
res = list()
calcNext(m, 0, 0, n - 1, res, [])
return res
files = os.listdir(dirpath)
signals = {}
for filepath in files:
if not filepath.startswith('parse_'):
continue
with open(dirpath + '/' + filepath, 'rb') as fr:
modName = filepath.split('parse_mod_')[1].split('.txt')[0]
signal = list()
for line in fr:
amps = line.strip().split('\t')
amps = map(float, amps)
signal.append(amps)
# signal = zip(*signal)
# signal = np.tile(signal, (20, 1))
signals[modName] = signal
modTypes = np.asarray(signals.keys())
modeNum = len(modTypes)
totalSignals = np.array([])
totalTags = list()
for mixNum in xrange(1, modeNum + 1):
groupIndeces = C(modeNum, mixNum)
groupNum = len(groupIndeces)
sampleEachMod = modeSize[mixNum - 1] // groupNum
groupSignals = np.array([])
for groupInd in groupIndeces:
mixSignals = np.array([])
tag = '\t'.join(map(str, sorted(groupInd)))
tag = str(tagger(tag))
while len(mixSignals) < sampleEachMod:
mixSignal = np.zeros([len(signals[modTypes[0]]), len(signals[modTypes[0]][0])])
for ind in groupInd:
curSignal = np.asarray(signals[modTypes[ind]])
randomIndeces = np.random.choice(len(curSignal), len(curSignal), replace=False)
randSignal = curSignal[randomIndeces]
mixSignal += randSignal
mixSignals = np.concatenate([mixSignals, mixSignal]) if mixSignals.shape[0] != 0 else mixSignal
mixSignals = mixSignals[:sampleEachMod, :]
totalTags.extend([tag] * sampleEachMod)
groupSignals = np.concatenate([groupSignals, mixSignals]) if groupSignals.shape[0] != 0 else mixSignals
totalSignals = np.concatenate([totalSignals, groupSignals]) if totalSignals.shape[0] != 0 else groupSignals
assert len(totalTags) == sum(modeSize)
assert len(totalSignals) == sum(modeSize)
indeces = np.random.choice(len(totalSignals), len(totalSignals), replace=False)
totalSignals = np.asarray(totalSignals)[indeces]
totalTags = np.asarray(totalTags)[indeces]
with open(savepath, 'wb') as fw:
for i in xrange(len(totalTags)):
signal = totalSignals[i]
signal = map(str, signal)
tag = totalTags[i]
fw.write('\t'.join(['\t'.join(signal), tag]) + '\n')
def split(filepath):
with open(filepath, 'rb') as fr:
X = list()
for line in fr:
X.append(line.strip())
X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
filename = filepath.split('/')[-1]
dirbase = filepath.split('/')[:-1]
with open('/'.join(dirbase + ['train_' + filename]), 'wb') as fw:
for line in X_train:
fw.write(line + '\n')
with open('/'.join(dirbase + ['test_' + filename]), 'wb') as fw:
for line in X_test:
fw.write(line + '\n')
if __name__ == '__main__':
# drawModulation('../data/5dB')
drawMixSignal('../data/50dB/mixSignals.txt')
# mixSignalAndTagging('../data/5dB', '../data/5dB/mixSignals.txt', [600, 1500, 2000])
# split('../data/5dB/mixSignals.txt')
|
apache-2.0
| -3,099,339,534,664,653,000 | 34.088235 | 115 | 0.516904 | false |
centricular/meson
|
mesonbuild/dependencies.py
|
1
|
52213
|
# Copyright 2013-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file contains the detection logic for external
# dependencies. Mostly just uses pkg-config but also contains
# custom logic for packages that don't provide them.
# Currently one file, should probably be split into a
# package before this gets too big.
import re
import os, stat, glob, subprocess, shutil
import sysconfig
from collections import OrderedDict
from . mesonlib import MesonException
from . import mlog
from . import mesonlib
from .environment import detect_cpu_family, for_windows
class DependencyException(MesonException):
def __init__(self, *args, **kwargs):
MesonException.__init__(self, *args, **kwargs)
class Dependency():
def __init__(self, type_name='unknown'):
self.name = "null"
self.is_found = False
self.type_name = type_name
def __repr__(self):
s = '<{0} {1}: {2}>'
return s.format(self.__class__.__name__, self.name, self.is_found)
def get_compile_args(self):
return []
def get_link_args(self):
return []
def found(self):
return self.is_found
def get_sources(self):
"""Source files that need to be added to the target.
As an example, gtest-all.cc when using GTest."""
return []
def get_name(self):
return self.name
def get_exe_args(self):
return []
def need_threads(self):
return False
def type_name(self):
return self.type_name
def get_pkgconfig_variable(self, variable_name):
raise MesonException('Tried to get a pkg-config variable from a non-pkgconfig dependency.')
class InternalDependency(Dependency):
def __init__(self, version, incdirs, compile_args, link_args, libraries, sources, ext_deps):
super().__init__('internal')
self.version = version
self.include_directories = incdirs
self.compile_args = compile_args
self.link_args = link_args
self.libraries = libraries
self.sources = sources
self.ext_deps = ext_deps
def get_compile_args(self):
return self.compile_args
def get_link_args(self):
return self.link_args
def get_version(self):
return self.version
class PkgConfigDependency(Dependency):
pkgconfig_found = None
def __init__(self, name, environment, kwargs):
Dependency.__init__(self, 'pkgconfig')
self.is_libtool = False
self.required = kwargs.get('required', True)
self.static = kwargs.get('static', False)
self.silent = kwargs.get('silent', False)
if not isinstance(self.static, bool):
raise DependencyException('Static keyword must be boolean')
self.cargs = []
self.libs = []
if 'native' in kwargs and environment.is_cross_build():
want_cross = not kwargs['native']
else:
want_cross = environment.is_cross_build()
self.name = name
if PkgConfigDependency.pkgconfig_found is None:
self.check_pkgconfig()
self.is_found = False
if not PkgConfigDependency.pkgconfig_found:
if self.required:
raise DependencyException('Pkg-config not found.')
return
if environment.is_cross_build() and want_cross:
if "pkgconfig" not in environment.cross_info.config["binaries"]:
raise DependencyException('Pkg-config binary missing from cross file.')
pkgbin = environment.cross_info.config["binaries"]['pkgconfig']
self.type_string = 'Cross'
else:
pkgbin = 'pkg-config'
self.type_string = 'Native'
mlog.debug('Determining dependency %s with pkg-config executable %s.' % (name, pkgbin))
self.pkgbin = pkgbin
ret, self.modversion = self._call_pkgbin(['--modversion', name])
if ret != 0:
if self.required:
raise DependencyException('%s dependency %s not found.' % (self.type_string, name))
self.modversion = 'none'
return
found_msg = ['%s dependency' % self.type_string, mlog.bold(name), 'found:']
self.version_requirement = kwargs.get('version', None)
if self.version_requirement is None:
self.is_found = True
else:
if not isinstance(self.version_requirement, str):
raise DependencyException('Version argument must be string.')
self.is_found = mesonlib.version_compare(self.modversion, self.version_requirement)
if not self.is_found:
found_msg += [mlog.red('NO'), 'found {!r}'.format(self.modversion),
'but need {!r}'.format(self.version_requirement)]
if not self.silent:
mlog.log(*found_msg)
if self.required:
raise DependencyException(
'Invalid version of a dependency, needed %s %s found %s.' %
(name, self.version_requirement, self.modversion))
return
found_msg += [mlog.green('YES'), self.modversion]
if not self.silent:
mlog.log(*found_msg)
# Fetch cargs to be used while using this dependency
self._set_cargs()
# Fetch the libraries and library paths needed for using this
self._set_libs()
def _call_pkgbin(self, args):
p = subprocess.Popen([self.pkgbin] + args,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=os.environ, universal_newlines=True)
out = p.communicate()[0]
return (p.returncode, out.strip())
def _set_cargs(self):
ret, out = self._call_pkgbin(['--cflags', self.name])
if ret != 0:
raise DependencyException('Could not generate cargs for %s:\n\n%s' % \
(self.name, out.decode(errors='ignore')))
self.cargs = out.split()
def _set_libs(self):
libcmd = [self.name, '--libs']
if self.static:
libcmd.append('--static')
ret, out = self._call_pkgbin(libcmd)
if ret != 0:
raise DependencyException('Could not generate libs for %s:\n\n%s' % \
(self.name, out.decode(errors='ignore')))
self.libs = []
for lib in out.split():
if lib.endswith(".la"):
shared_libname = self.extract_libtool_shlib(lib)
shared_lib = os.path.join(os.path.dirname(lib), shared_libname)
if not os.path.exists(shared_lib):
shared_lib = os.path.join(os.path.dirname(lib), ".libs", shared_libname)
if not os.path.exists(shared_lib):
raise DependencyException('Got a libtools specific "%s" dependencies'
'but we could not compute the actual shared'
'library path' % lib)
lib = shared_lib
self.is_libtool = True
self.libs.append(lib)
def get_pkgconfig_variable(self, variable_name):
ret, out = self._call_pkgbin(['--variable=' + variable_name, self.name])
variable = ''
if ret != 0:
if self.required:
raise DependencyException('%s dependency %s not found.' %
(self.type_string, self.name))
else:
variable = out.strip()
mlog.debug('Got pkgconfig variable %s : %s' % (variable_name, variable))
return variable
def get_modversion(self):
return self.modversion
def get_version(self):
return self.get_modversion()
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def check_pkgconfig(self):
try:
p = subprocess.Popen(['pkg-config', '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode == 0:
if not self.silent:
mlog.log('Found pkg-config:', mlog.bold(shutil.which('pkg-config')),
'(%s)' % out.decode().strip())
PkgConfigDependency.pkgconfig_found = True
return
except Exception:
pass
PkgConfigDependency.pkgconfig_found = False
if not self.silent:
mlog.log('Found Pkg-config:', mlog.red('NO'))
def found(self):
return self.is_found
def extract_field(self, la_file, fieldname):
with open(la_file) as f:
for line in f:
arr = line.strip().split('=')
if arr[0] == fieldname:
return arr[1][1:-1]
return None
def extract_dlname_field(self, la_file):
return self.extract_field(la_file, 'dlname')
def extract_libdir_field(self, la_file):
return self.extract_field(la_file, 'libdir')
def extract_libtool_shlib(self, la_file):
'''
Returns the path to the shared library
corresponding to this .la file
'''
dlname = self.extract_dlname_field(la_file)
if dlname is None:
return None
# Darwin uses absolute paths where possible; since the libtool files never
# contain absolute paths, use the libdir field
if mesonlib.is_osx():
dlbasename = os.path.basename(dlname)
libdir = self.extract_libdir_field(la_file)
if libdir is None:
return dlbasename
return os.path.join(libdir, dlbasename)
# From the comments in extract_libtool(), older libtools had
# a path rather than the raw dlname
return os.path.basename(dlname)
class WxDependency(Dependency):
wx_found = None
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'wx')
self.is_found = False
if WxDependency.wx_found is None:
self.check_wxconfig()
if not WxDependency.wx_found:
mlog.log("Neither wx-config-3.0 nor wx-config found; can't detect dependency")
return
p = subprocess.Popen([self.wxc, '--version'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
mlog.log('Dependency wxwidgets found:', mlog.red('NO'))
self.cargs = []
self.libs = []
else:
self.modversion = out.decode().strip()
version_req = kwargs.get('version', None)
if version_req is not None:
if not mesonlib.version_compare(self.modversion, version_req):
mlog.log('Wxwidgets version %s does not fullfill requirement %s' %\
(self.modversion, version_req))
return
mlog.log('Dependency wxwidgets found:', mlog.green('YES'))
self.is_found = True
self.requested_modules = self.get_requested(kwargs)
# wx-config seems to have a cflags as well but since it requires C++,
# this should be good, at least for now.
p = subprocess.Popen([self.wxc, '--cxxflags'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
raise DependencyException('Could not generate cargs for wxwidgets.')
self.cargs = out.decode().split()
p = subprocess.Popen([self.wxc, '--libs'] + self.requested_modules,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode != 0:
raise DependencyException('Could not generate libs for wxwidgets.')
self.libs = out.decode().split()
def get_requested(self, kwargs):
modules = 'modules'
if not modules in kwargs:
return []
candidates = kwargs[modules]
if isinstance(candidates, str):
return [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('wxwidgets module argument is not a string.')
return candidates
def get_modversion(self):
return self.modversion
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def check_wxconfig(self):
for wxc in ['wx-config-3.0', 'wx-config']:
try:
p = subprocess.Popen([wxc, '--version'], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = p.communicate()[0]
if p.returncode == 0:
mlog.log('Found wx-config:', mlog.bold(shutil.which(wxc)),
'(%s)' % out.decode().strip())
self.wxc = wxc
WxDependency.wx_found = True
return
except Exception:
pass
WxDependency.wxconfig_found = False
mlog.log('Found wx-config:', mlog.red('NO'))
def found(self):
return self.is_found
class ExternalProgram():
def __init__(self, name, fullpath=None, silent=False, search_dir=None):
self.name = name
if fullpath is not None:
if not isinstance(fullpath, list):
self.fullpath = [fullpath]
else:
self.fullpath = fullpath
else:
self.fullpath = self._search(name, search_dir)
if not silent:
if self.found():
mlog.log('Program', mlog.bold(name), 'found:', mlog.green('YES'),
'(%s)' % ' '.join(self.fullpath))
else:
mlog.log('Program', mlog.bold(name), 'found:', mlog.red('NO'))
@staticmethod
def _shebang_to_cmd(script):
"""
Windows does not understand shebangs, so we check if the file has a
shebang and manually parse it to figure out the interpreter to use
"""
try:
with open(script) as f:
first_line = f.readline().strip()
if first_line.startswith('#!'):
commands = first_line[2:].split('#')[0].strip().split()
if mesonlib.is_windows():
# Windows does not have /usr/bin.
commands[0] = commands[0].split('/')[-1]
if commands[0] == 'env':
commands = commands[1:]
return commands + [script]
except Exception:
pass
return False
@staticmethod
def _is_executable(path):
suffix = os.path.splitext(path)[-1].lower()[1:]
if mesonlib.is_windows():
if suffix == 'exe' or suffix == 'com' or suffix == 'bat':
return True
elif os.access(path, os.X_OK):
return True
return False
def _search_dir(self, name, search_dir):
if search_dir is None:
return False
trial = os.path.join(search_dir, name)
if not os.path.exists(trial):
return False
if self._is_executable(trial):
return [trial]
# Now getting desperate. Maybe it is a script file that is a) not chmodded
# executable or b) we are on windows so they can't be directly executed.
return self._shebang_to_cmd(trial)
def _search(self, name, search_dir):
commands = self._search_dir(name, search_dir)
if commands:
return commands
# Do a standard search in PATH
fullpath = shutil.which(name)
if fullpath or not mesonlib.is_windows():
# On UNIX-like platforms, the standard PATH search is enough
return [fullpath]
# On Windows, interpreted scripts must have an extension otherwise they
# cannot be found by a standard PATH search. So we do a custom search
# where we manually search for a script with a shebang in PATH.
search_dirs = os.environ.get('PATH', '').split(';')
for search_dir in search_dirs:
commands = self._search_dir(name, search_dir)
if commands:
return commands
return [None]
def found(self):
return self.fullpath[0] is not None
def get_command(self):
return self.fullpath
def get_name(self):
return self.name
class ExternalLibrary(Dependency):
# TODO: Add `lang` to the parent Dependency object so that dependencies can
# be expressed for languages other than C-like
def __init__(self, name, link_args=None, language=None, silent=False):
super().__init__('external')
self.name = name
self.is_found = False
self.link_args = []
self.lang_args = []
if link_args:
self.is_found = True
if not isinstance(link_args, list):
link_args = [link_args]
if language:
self.lang_args = {language: link_args}
else:
self.link_args = link_args
if not silent:
if self.is_found:
mlog.log('Library', mlog.bold(name), 'found:', mlog.green('YES'))
else:
mlog.log('Library', mlog.bold(name), 'found:', mlog.red('NO'))
def found(self):
return self.is_found
def get_link_args(self):
return self.link_args
def get_lang_args(self, lang):
if lang in self.lang_args:
return self.lang_args[lang]
return []
class BoostDependency(Dependency):
# Some boost libraries have different names for
# their sources and libraries. This dict maps
# between the two.
name2lib = {'test' : 'unit_test_framework'}
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'boost')
self.name = 'boost'
self.environment = environment
self.libdir = ''
if 'native' in kwargs and environment.is_cross_build():
want_cross = not kwargs['native']
else:
want_cross = environment.is_cross_build()
try:
self.boost_root = os.environ['BOOST_ROOT']
if not os.path.isabs(self.boost_root):
raise DependencyException('BOOST_ROOT must be an absolute path.')
except KeyError:
self.boost_root = None
if self.boost_root is None:
if want_cross:
raise DependencyException('BOOST_ROOT is needed while cross-compiling')
if mesonlib.is_windows():
self.boost_root = self.detect_win_root()
self.incdir = self.boost_root
else:
self.incdir = '/usr/include'
else:
self.incdir = os.path.join(self.boost_root, 'include')
self.boost_inc_subdir = os.path.join(self.incdir, 'boost')
mlog.debug('Boost library root dir is', self.boost_root)
self.src_modules = {}
self.lib_modules = {}
self.lib_modules_mt = {}
self.detect_version()
self.requested_modules = self.get_requested(kwargs)
module_str = ', '.join(self.requested_modules)
if self.version is not None:
self.detect_src_modules()
self.detect_lib_modules()
self.validate_requested()
if self.boost_root is not None:
info = self.version + ', ' + self.boost_root
else:
info = self.version
mlog.log('Dependency Boost (%s) found:' % module_str, mlog.green('YES'),
'(' + info + ')')
else:
mlog.log("Dependency Boost (%s) found:" % module_str, mlog.red('NO'))
def detect_win_root(self):
globtext = 'c:\\local\\boost_*'
files = glob.glob(globtext)
if len(files) > 0:
return files[0]
return 'C:\\'
def get_compile_args(self):
args = []
if self.boost_root is not None:
if mesonlib.is_windows():
args.append('-I' + self.boost_root)
else:
args.append('-I' + os.path.join(self.boost_root, 'include'))
else:
args.append('-I' + self.incdir)
return args
def get_requested(self, kwargs):
candidates = kwargs.get('modules', [])
if isinstance(candidates, str):
return [candidates]
for c in candidates:
if not isinstance(c, str):
raise DependencyException('Boost module argument is not a string.')
return candidates
def validate_requested(self):
for m in self.requested_modules:
if m not in self.src_modules:
raise DependencyException('Requested Boost module "%s" not found.' % m)
def found(self):
return self.version is not None
def get_version(self):
return self.version
def detect_version(self):
try:
ifile = open(os.path.join(self.boost_inc_subdir, 'version.hpp'))
except FileNotFoundError:
self.version = None
return
with ifile:
for line in ifile:
if line.startswith("#define") and 'BOOST_LIB_VERSION' in line:
ver = line.split()[-1]
ver = ver[1:-1]
self.version = ver.replace('_', '.')
return
self.version = None
def detect_src_modules(self):
for entry in os.listdir(self.boost_inc_subdir):
entry = os.path.join(self.boost_inc_subdir, entry)
if stat.S_ISDIR(os.stat(entry).st_mode):
self.src_modules[os.path.split(entry)[-1]] = True
def detect_lib_modules(self):
if mesonlib.is_windows():
return self.detect_lib_modules_win()
return self.detect_lib_modules_nix()
def detect_lib_modules_win(self):
arch = detect_cpu_family(self.environment.coredata.compilers)
# Guess the libdir
if arch == 'x86':
gl = 'lib32*'
elif arch == 'x86_64':
gl = 'lib64*'
else:
# Does anyone do Boost cross-compiling to other archs on Windows?
gl = None
# See if the libdir is valid
if gl:
libdir = glob.glob(os.path.join(self.boost_root, gl))
else:
libdir = []
# Can't find libdir, bail
if len(libdir) == 0:
return
libdir = libdir[0]
self.libdir = libdir
globber = 'boost_*-gd-*.lib' # FIXME
for entry in glob.glob(os.path.join(libdir, globber)):
(_, fname) = os.path.split(entry)
base = fname.split('_', 1)[1]
modname = base.split('-', 1)[0]
self.lib_modules_mt[modname] = fname
def detect_lib_modules_nix(self):
libsuffix = None
if mesonlib.is_osx():
libsuffix = 'dylib'
else:
libsuffix = 'so'
globber = 'libboost_*.{}'.format(libsuffix)
if self.boost_root is None:
libdirs = mesonlib.get_library_dirs()
else:
libdirs = [os.path.join(self.boost_root, 'lib')]
for libdir in libdirs:
for entry in glob.glob(os.path.join(libdir, globber)):
lib = os.path.basename(entry)
name = lib.split('.')[0].split('_', 1)[-1]
# I'm not 100% sure what to do here. Some distros
# have modules such as thread only as -mt versions.
if entry.endswith('-mt.so'):
self.lib_modules_mt[name] = True
else:
self.lib_modules[name] = True
def get_win_link_args(self):
args = []
if self.boost_root:
args.append('-L' + self.libdir)
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
if module in self.lib_modules_mt:
args.append(self.lib_modules_mt[module])
return args
def get_link_args(self):
if mesonlib.is_windows():
return self.get_win_link_args()
args = []
if self.boost_root:
args.append('-L' + os.path.join(self.boost_root, 'lib'))
for module in self.requested_modules:
module = BoostDependency.name2lib.get(module, module)
if module in self.lib_modules or module in self.lib_modules_mt:
linkcmd = '-lboost_' + module
args.append(linkcmd)
# FIXME a hack, but Boost's testing framework has a lot of
# different options and it's hard to determine what to do
# without feedback from actual users. Update this
# as we get more bug reports.
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor')
elif module + '-mt' in self.lib_modules_mt:
linkcmd = '-lboost_' + module + '-mt'
args.append(linkcmd)
if module == 'unit_testing_framework':
args.append('-lboost_test_exec_monitor-mt')
return args
def get_sources(self):
return []
def need_threads(self):
return 'thread' in self.requested_modules
class GTestDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gtest')
self.main = kwargs.get('main', False)
self.name = 'gtest'
self.libname = 'libgtest.so'
self.libmain_name = 'libgtest_main.so'
self.include_dir = '/usr/include'
self.src_dirs = ['/usr/src/gtest/src', '/usr/src/googletest/googletest/src']
self.detect()
def found(self):
return self.is_found
def detect(self):
trial_dirs = mesonlib.get_library_dirs()
glib_found = False
gmain_found = False
for d in trial_dirs:
if os.path.isfile(os.path.join(d, self.libname)):
glib_found = True
if os.path.isfile(os.path.join(d, self.libmain_name)):
gmain_found = True
if glib_found and gmain_found:
self.is_found = True
self.compile_args = []
self.link_args = ['-lgtest']
if self.main:
self.link_args.append('-lgtest_main')
self.sources = []
mlog.log('Dependency GTest found:', mlog.green('YES'), '(prebuilt)')
elif self.detect_srcdir():
self.is_found = True
self.compile_args = ['-I' + self.src_include_dir]
self.link_args = []
if self.main:
self.sources = [self.all_src, self.main_src]
else:
self.sources = [self.all_src]
mlog.log('Dependency GTest found:', mlog.green('YES'), '(building self)')
else:
mlog.log('Dependency GTest found:', mlog.red('NO'))
self.is_found = False
return self.is_found
def detect_srcdir(self):
for s in self.src_dirs:
if os.path.exists(s):
self.src_dir = s
self.all_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest-all.cc'))
self.main_src = mesonlib.File.from_absolute_file(
os.path.join(self.src_dir, 'gtest_main.cc'))
self.src_include_dir = os.path.normpath(os.path.join(self.src_dir, '..'))
return True
return False
def get_compile_args(self):
arr = []
if self.include_dir != '/usr/include':
arr.append('-I' + self.include_dir)
if hasattr(self, 'src_include_dir'):
arr.append('-I' + self.src_include_dir)
return arr
def get_link_args(self):
return self.link_args
def get_version(self):
return '1.something_maybe'
def get_sources(self):
return self.sources
def need_threads(self):
return True
class GMockDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gmock')
# GMock may be a library or just source.
# Work with both.
self.name = 'gmock'
self.libname = 'libgmock.so'
trial_dirs = mesonlib.get_library_dirs()
gmock_found = False
for d in trial_dirs:
if os.path.isfile(os.path.join(d, self.libname)):
gmock_found = True
if gmock_found:
self.is_found = True
self.compile_args = []
self.link_args = ['-lgmock']
self.sources = []
mlog.log('Dependency GMock found:', mlog.green('YES'), '(prebuilt)')
return
for d in ['/usr/src/googletest/googlemock/src', '/usr/src/gmock/src', '/usr/src/gmock']:
if os.path.exists(d):
self.is_found = True
# Yes, we need both because there are multiple
# versions of gmock that do different things.
d2 = os.path.normpath(os.path.join(d, '..'))
self.compile_args = ['-I' + d, '-I' + d2]
self.link_args = []
all_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock-all.cc'))
main_src = mesonlib.File.from_absolute_file(os.path.join(d, 'gmock_main.cc'))
if kwargs.get('main', False):
self.sources = [all_src, main_src]
else:
self.sources = [all_src]
mlog.log('Dependency GMock found:', mlog.green('YES'), '(building self)')
return
mlog.log('Dependency GMock found:', mlog.red('NO'))
self.is_found = False
def get_version(self):
return '1.something_maybe'
def get_compile_args(self):
return self.compile_args
def get_sources(self):
return self.sources
def get_link_args(self):
return self.link_args
def found(self):
return self.is_found
class QtBaseDependency(Dependency):
def __init__(self, name, env, kwargs):
Dependency.__init__(self, name)
self.name = name
self.qtname = name.capitalize()
self.qtver = name[-1]
if self.qtver == "4":
self.qtpkgname = 'Qt'
else:
self.qtpkgname = self.qtname
self.root = '/usr'
self.bindir = None
self.silent = kwargs.get('silent', False)
# We store the value of required here instead of passing it on to
# PkgConfigDependency etc because we want to try the qmake-based
# fallback as well.
self.required = kwargs.pop('required', True)
kwargs['required'] = False
mods = kwargs.get('modules', [])
self.cargs = []
self.largs = []
self.is_found = False
if isinstance(mods, str):
mods = [mods]
if len(mods) == 0:
raise DependencyException('No ' + self.qtname + ' modules specified.')
type_text = 'cross' if env.is_cross_build() else 'native'
found_msg = '{} {} {{}} dependency (modules: {}) found:' \
''.format(self.qtname, type_text, ', '.join(mods))
from_text = '`pkg-config`'
# Prefer pkg-config, then fallback to `qmake -query`
self._pkgconfig_detect(mods, env, kwargs)
if not self.is_found:
from_text = self._qmake_detect(mods, env, kwargs)
if not self.is_found:
# Reset compile args and link args
self.cargs = []
self.largs = []
from_text = '(checked pkg-config, qmake-{}, and qmake)' \
''.format(self.name)
self.version = 'none'
if self.required:
err_msg = '{} {} dependency not found {}' \
''.format(self.qtname, type_text, from_text)
raise DependencyException(err_msg)
if not self.silent:
mlog.log(found_msg.format(from_text), mlog.red('NO'))
return
from_text = '`{}`'.format(from_text)
if not self.silent:
mlog.log(found_msg.format(from_text), mlog.green('YES'))
def compilers_detect(self):
"Detect Qt (4 or 5) moc, uic, rcc in the specified bindir or in PATH"
if self.bindir:
moc = ExternalProgram(os.path.join(self.bindir, 'moc'), silent=True)
uic = ExternalProgram(os.path.join(self.bindir, 'uic'), silent=True)
rcc = ExternalProgram(os.path.join(self.bindir, 'rcc'), silent=True)
else:
# We don't accept unsuffixed 'moc', 'uic', and 'rcc' because they
# are sometimes older, or newer versions.
moc = ExternalProgram('moc-' + self.name, silent=True)
uic = ExternalProgram('uic-' + self.name, silent=True)
rcc = ExternalProgram('rcc-' + self.name, silent=True)
return moc, uic, rcc
def _pkgconfig_detect(self, mods, env, kwargs):
modules = OrderedDict()
for module in mods:
modules[module] = PkgConfigDependency(self.qtpkgname + module, env, kwargs)
self.is_found = True
for m in modules.values():
if not m.found():
self.is_found = False
return
self.cargs += m.get_compile_args()
self.largs += m.get_link_args()
self.version = m.modversion
# Try to detect moc, uic, rcc
if 'Core' in modules:
core = modules['Core']
else:
corekwargs = {'required': 'false', 'silent': 'true'}
core = PkgConfigDependency(self.qtpkgname + 'Core', env, corekwargs)
# Used by self.compilers_detect()
self.bindir = core.get_pkgconfig_variable('host_bins')
if not self.bindir:
# If exec_prefix is not defined, the pkg-config file is broken
prefix = core.get_pkgconfig_variable('exec_prefix')
if prefix:
self.bindir = os.path.join(prefix, 'bin')
def _find_qmake(self, qmake, env):
# Even when cross-compiling, if we don't get a cross-info qmake, we
# fallback to using the qmake in PATH because that's what we used to do
if env.is_cross_build():
qmake = env.cross_info.config['binaries'].get('qmake', qmake)
return ExternalProgram(qmake, silent=True)
def _qmake_detect(self, mods, env, kwargs):
for qmake in ('qmake-' + self.name, 'qmake'):
self.qmake = self._find_qmake(qmake, env)
if not self.qmake.found():
continue
# Check that the qmake is for qt5
pc = subprocess.Popen(self.qmake.fullpath + ['-v'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True)
stdo = pc.communicate()[0]
if pc.returncode != 0:
continue
if not 'Qt version ' + self.qtver in stdo:
mlog.log('QMake is not for ' + self.qtname)
continue
# Found qmake for Qt5!
break
else:
# Didn't find qmake :(
return
self.version = re.search(self.qtver + '(\.\d+)+', stdo).group(0)
# Query library path, header path, and binary path
stdo = subprocess.Popen(self.qmake.fullpath + ['-query'],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True).communicate()[0]
qvars = {}
for line in stdo.split('\n'):
line = line.strip()
if line == '':
continue
(k, v) = tuple(line.split(':', 1))
qvars[k] = v
if mesonlib.is_osx():
return self._framework_detect(qvars, mods, kwargs)
incdir = qvars['QT_INSTALL_HEADERS']
self.cargs.append('-I' + incdir)
libdir = qvars['QT_INSTALL_LIBS']
# Used by self.compilers_detect()
self.bindir = qvars['QT_INSTALL_BINS']
self.is_found = True
for module in mods:
mincdir = os.path.join(incdir, 'Qt' + module)
self.cargs.append('-I' + mincdir)
if for_windows(env.is_cross_build(), env):
libfile = os.path.join(libdir, self.qtpkgname + module + '.lib')
if not os.path.isfile(libfile):
# MinGW can link directly to .dll
libfile = os.path.join(self.bindir, self.qtpkgname + module + '.dll')
if not os.path.isfile(libfile):
self.is_found = False
break
else:
libfile = os.path.join(libdir, 'lib{}{}.so'.format(self.qtpkgname, module))
if not os.path.isfile(libfile):
self.is_found = False
break
self.largs.append(libfile)
return qmake
def _framework_detect(self, qvars, modules, kwargs):
libdir = qvars['QT_INSTALL_LIBS']
for m in modules:
fname = 'Qt' + m
fwdep = ExtraFrameworkDependency(fname, kwargs.get('required', True), libdir)
self.cargs.append('-F' + libdir)
if fwdep.found():
self.is_found = True
self.cargs += fwdep.get_compile_args()
self.largs += fwdep.get_link_args()
# Used by self.compilers_detect()
self.bindir = qvars['QT_INSTALL_BINS']
def get_version(self):
return self.version
def get_compile_args(self):
return self.cargs
def get_sources(self):
return []
def get_link_args(self):
return self.largs
def found(self):
return self.is_found
def get_exe_args(self):
# Originally this was -fPIE but nowadays the default
# for upstream and distros seems to be -reduce-relocations
# which requires -fPIC. This may cause a performance
# penalty when using self-built Qt or on platforms
# where -fPIC is not required. If this is an issue
# for you, patches are welcome.
# Fix this to be more portable, especially to MSVC.
return ['-fPIC']
class Qt5Dependency(QtBaseDependency):
def __init__(self, env, kwargs):
QtBaseDependency.__init__(self, 'qt5', env, kwargs)
class Qt4Dependency(QtBaseDependency):
def __init__(self, env, kwargs):
QtBaseDependency.__init__(self, 'qt4', env, kwargs)
class GnuStepDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gnustep')
self.modules = kwargs.get('modules', [])
self.detect()
def detect(self):
confprog = 'gnustep-config'
try:
gp = subprocess.Popen([confprog, '--help'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
gp.communicate()
except FileNotFoundError:
self.args = None
mlog.log('Dependency GnuStep found:', mlog.red('NO'), '(no gnustep-config)')
return
if gp.returncode != 0:
self.args = None
mlog.log('Dependency GnuStep found:', mlog.red('NO'))
return
if 'gui' in self.modules:
arg = '--gui-libs'
else:
arg = '--base-libs'
fp = subprocess.Popen([confprog, '--objc-flags'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(flagtxt, flagerr) = fp.communicate()
flagtxt = flagtxt.decode()
flagerr = flagerr.decode()
if fp.returncode != 0:
raise DependencyException('Error getting objc-args: %s %s' % (flagtxt, flagerr))
args = flagtxt.split()
self.args = self.filter_arsg(args)
fp = subprocess.Popen([confprog, arg],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(libtxt, liberr) = fp.communicate()
libtxt = libtxt.decode()
liberr = liberr.decode()
if fp.returncode != 0:
raise DependencyException('Error getting objc-lib args: %s %s' % (libtxt, liberr))
self.libs = self.weird_filter(libtxt.split())
mlog.log('Dependency GnuStep found:', mlog.green('YES'))
def weird_filter(self, elems):
"""When building packages, the output of the enclosing Make
is sometimes mixed among the subprocess output. I have no idea
why. As a hack filter out everything that is not a flag."""
return [e for e in elems if e.startswith('-')]
def filter_arsg(self, args):
"""gnustep-config returns a bunch of garbage args such
as -O2 and so on. Drop everything that is not needed."""
result = []
for f in args:
if f.startswith('-D') or f.startswith('-f') or \
f.startswith('-I') or f == '-pthread' or\
(f.startswith('-W') and not f == '-Wall'):
result.append(f)
return result
def found(self):
return self.args is not None
def get_compile_args(self):
if self.args is None:
return []
return self.args
def get_link_args(self):
return self.libs
class AppleFrameworks(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'appleframeworks')
modules = kwargs.get('modules', [])
if isinstance(modules, str):
modules = [modules]
if len(modules) == 0:
raise DependencyException("AppleFrameworks dependency requires at least one module.")
self.frameworks = modules
def get_link_args(self):
args = []
for f in self.frameworks:
args.append('-framework')
args.append(f)
return args
def found(self):
return mesonlib.is_osx()
class GLDependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'gl')
self.is_found = False
self.cargs = []
self.linkargs = []
try:
pcdep = PkgConfigDependency('gl', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.cargs = pcdep.get_compile_args()
self.linkargs = pcdep.get_link_args()
return
except Exception:
pass
if mesonlib.is_osx():
self.is_found = True
self.linkargs = ['-framework', 'OpenGL']
return
if mesonlib.is_windows():
self.is_found = True
self.linkargs = ['-lopengl32']
return
def get_link_args(self):
return self.linkargs
# There are three different ways of depending on SDL2:
# sdl2-config, pkg-config and OSX framework
class SDL2Dependency(Dependency):
def __init__(self, environment, kwargs):
Dependency.__init__(self, 'sdl2')
self.is_found = False
self.cargs = []
self.linkargs = []
try:
pcdep = PkgConfigDependency('sdl2', environment, kwargs)
if pcdep.found():
self.type_name = 'pkgconfig'
self.is_found = True
self.cargs = pcdep.get_compile_args()
self.linkargs = pcdep.get_link_args()
self.version = pcdep.get_version()
return
except Exception as e:
mlog.debug('SDL 2 not found via pkgconfig. Trying next, error was:', str(e))
pass
sdlconf = shutil.which('sdl2-config')
if sdlconf:
pc = subprocess.Popen(['sdl2-config', '--cflags'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
self.cargs = stdo.decode().strip().split()
pc = subprocess.Popen(['sdl2-config', '--libs'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
self.linkargs = stdo.decode().strip().split()
self.is_found = True
mlog.log('Dependency', mlog.bold('sdl2'), 'found:', mlog.green('YES'), '(%s)' % sdlconf)
self.version = '2' # FIXME
return
mlog.debug('Could not find sdl2-config binary, trying next.')
if mesonlib.is_osx():
fwdep = ExtraFrameworkDependency('sdl2', kwargs.get('required', True))
if fwdep.found():
self.is_found = True
self.cargs = fwdep.get_compile_args()
self.linkargs = fwdep.get_link_args()
self.version = '2' # FIXME
return
mlog.log('Dependency', mlog.bold('sdl2'), 'found:', mlog.red('NO'))
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.linkargs
def found(self):
return self.is_found
def get_version(self):
return self.version
class ExtraFrameworkDependency(Dependency):
def __init__(self, name, required, path=None):
Dependency.__init__(self, 'extraframeworks')
self.name = None
self.detect(name, path)
if self.found():
mlog.log('Dependency', mlog.bold(name), 'found:', mlog.green('YES'),
os.path.join(self.path, self.name))
else:
mlog.log('Dependency', name, 'found:', mlog.red('NO'))
def detect(self, name, path):
lname = name.lower()
if path is None:
paths = ['/Library/Frameworks']
else:
paths = [path]
for p in paths:
for d in os.listdir(p):
fullpath = os.path.join(p, d)
if lname != d.split('.')[0].lower():
continue
if not stat.S_ISDIR(os.stat(fullpath).st_mode):
continue
self.path = p
self.name = d
return
def get_compile_args(self):
if self.found():
return ['-I' + os.path.join(self.path, self.name, 'Headers')]
return []
def get_link_args(self):
if self.found():
return ['-F' + self.path, '-framework', self.name.split('.')[0]]
return []
def found(self):
return self.name is not None
class ThreadDependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('threads')
self.name = 'threads'
self.is_found = True
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
def need_threads(self):
return True
class Python3Dependency(Dependency):
def __init__(self, environment, kwargs):
super().__init__('python3')
self.name = 'python3'
self.is_found = False
self.version = "3.something_maybe"
try:
pkgdep = PkgConfigDependency('python3', environment, kwargs)
if pkgdep.found():
self.cargs = pkgdep.cargs
self.libs = pkgdep.libs
self.version = pkgdep.get_version()
self.is_found = True
return
except Exception:
pass
if not self.is_found:
if mesonlib.is_windows():
inc = sysconfig.get_path('include')
platinc = sysconfig.get_path('platinclude')
self.cargs = ['-I' + inc]
if inc != platinc:
self.cargs.append('-I' + platinc)
# Nothing exposes this directly that I coulf find
basedir = sysconfig.get_config_var('base')
vernum = sysconfig.get_config_var('py_version_nodot')
self.libs = ['-L{}/libs'.format(basedir),
'-lpython{}'.format(vernum)]
self.is_found = True
self.version = sysconfig.get_config_var('py_version_short')
elif mesonlib.is_osx():
# In OSX the Python 3 framework does not have a version
# number in its name.
fw = ExtraFrameworkDependency('python', False)
if fw.found():
self.cargs = fw.get_compile_args()
self.libs = fw.get_link_args()
self.is_found = True
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES'))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def get_compile_args(self):
return self.cargs
def get_link_args(self):
return self.libs
def get_version(self):
return self.version
def get_dep_identifier(name, kwargs):
elements = [name]
modlist = kwargs.get('modules', [])
if isinstance(modlist, str):
modlist = [modlist]
for module in modlist:
elements.append(module)
# We use a tuple because we need a non-mutable structure to use as the key
# of a dictionary and a string has potential for name collisions
identifier = tuple(elements)
identifier += ('main', kwargs.get('main', False))
identifier += ('static', kwargs.get('static', False))
if 'fallback' in kwargs:
f = kwargs.get('fallback')
identifier += ('fallback', f[0], f[1])
return identifier
def find_external_dependency(name, environment, kwargs):
required = kwargs.get('required', True)
if not isinstance(required, bool):
raise DependencyException('Keyword "required" must be a boolean.')
lname = name.lower()
if lname in packages:
dep = packages[lname](environment, kwargs)
if required and not dep.found():
raise DependencyException('Dependency "%s" not found' % name)
return dep
pkg_exc = None
pkgdep = None
try:
pkgdep = PkgConfigDependency(name, environment, kwargs)
if pkgdep.found():
return pkgdep
except Exception as e:
pkg_exc = e
if mesonlib.is_osx():
fwdep = ExtraFrameworkDependency(name, required)
if required and not fwdep.found():
raise DependencyException('Dependency "%s" not found' % name)
return fwdep
if pkg_exc is not None:
raise pkg_exc
mlog.log('Dependency', mlog.bold(name), 'found:', mlog.red('NO'))
return pkgdep
# This has to be at the end so the classes it references
# are defined.
packages = {'boost': BoostDependency,
'gtest': GTestDependency,
'gmock': GMockDependency,
'qt5': Qt5Dependency,
'qt4': Qt4Dependency,
'gnustep': GnuStepDependency,
'appleframeworks': AppleFrameworks,
'wxwidgets' : WxDependency,
'sdl2' : SDL2Dependency,
'gl' : GLDependency,
'threads' : ThreadDependency,
'python3' : Python3Dependency,
}
|
apache-2.0
| 5,174,436,312,750,799,000 | 37.056122 | 100 | 0.547775 | false |
11mariom/sensu-checks
|
unifi/unifi-get-stats.py
|
1
|
1510
|
#!/usr/bin/env python
#
# Get stats from a Unifi AP controller using the API
#
# Requires the unifi python module
#
import argparse
import json
from unifi.controller import Controller
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--controller', default='unifi', help='the controller address (default "unifi")')
parser.add_argument('-u', '--username', default='admin', help='the controller username (default("admin")')
parser.add_argument('-p', '--password', default='', help='the controller password')
parser.add_argument('-b', '--port', default='8443', help='the controller port (default "8443")')
parser.add_argument('-v', '--version', default='v2', help='the controller base version (default "v2")')
parser.add_argument('-s', '--siteid', default='default', help='the site ID, UniFi >=3.x only (default "default")')
args = parser.parse_args()
c = Controller(args.controller, args.username, args.password, args.version, args.siteid)
aps = c.get_aps()
total = guests = users = rx = tx = 0
data = dict(all=1)
for ap in aps:
data[ap['name']] = dict(uptime=ap['uptime'], total=ap['num_sta'], guests=ap['guest-num_sta'], users=ap['user-num_sta'],
tx=ap['stat']['tx_bytes'], rx=ap['stat']['rx_bytes'])
total += ap['num_sta']
guests += ap['guest-num_sta']
users += ap['user-num_sta']
rx += ap['stat']['rx_bytes']
tx += ap['stat']['tx_bytes']
data["all"] = dict( total=total, guests=guests, users=users, rx=rx, tx=tx )
print json.dumps(data)
|
gpl-2.0
| -6,542,056,813,486,104,000 | 38.736842 | 123 | 0.654967 | false |
philvoyer/ANI2012A17
|
Module09/EXE04/ANI2012A17_Locator.py
|
1
|
6945
|
# ANI2102A17_Locator.py | Programmation Python avec Maya | coding=utf-8
# Génération d'une représentation visuelle d'un localisateur à partir d'une arborescence de primitives géométriques (sphère, cylindre, cube).
print "\n<début de l'exécution>\n"
# paramètres du programme
# facteur de dimensionnement
scaleFactor = 1.0
# option pour système de coordonnées de main gauche ou droite (ex: Maya = main droite, Unity = main gauche)
isLeftHandedSystem = False
# option pour version en couleur des primitives (XYZ>RGB)
isInColor = True
# initialisation
# état logique en fonction du paramètre
isRightHandedSystem = not isLeftHandedSystem
print "\n<extraction de la transformation du premier objet sélectionné>\n"
# obtenir la liste des objets sélectionnés
selection = maya.cmds.ls(selection=True)
# valider s'il y a au moins un objet sélectionné
if len(selection) == 0:
# transformation par défaut si aucun objet sélectionné
selectionPosition = (0, 0, 0)
selectionRotation = (0, 0, 0)
else:
# aller chercher la position et l'orientation du premier objet de la liste des objets sélectionnés
selectionPosition = maya.cmds.xform(selection[0], query=True, worldSpace=True, translation=True)
selectionRotation = maya.cmds.xform(selection[0], query=True, worldSpace=True, rotation=True)
print "\n<création de la racine>\n"
# création d'un objet transformé dans l'espace mais invisible qui sera utilisé comme racine de l'arborescence des composantes visuelles
basis = maya.cmds.spaceLocator(name='basis', position=(0, 0, 0))
# création d'une sphère qui symbolise l'origine de la base orthonormale
sphereRadius = scaleFactor
sphereResolutionX = 16
sphereResolutionY = 16
origin = maya.cmds.polySphere(name='basisOrigin', subdivisionsX=sphereResolutionX, subdivisionsY=sphereResolutionY, radius=sphereRadius*2)
print "\n<création des axes>\n"
# création de 3 cylindres qui symbolisent les axes X, Y et Z de la base orthonormale
cylindreResolution = 12
cylindreHeight = scaleFactor * 8
cylindreRadius = sphereRadius * 0.618
axisX = maya.cmds.polyCylinder(name='basisAxisX', subdivisionsAxis=cylindreResolution, radius=cylindreRadius, height=cylindreHeight)
axisY = maya.cmds.polyCylinder(name='basisAxisY', subdivisionsAxis=cylindreResolution, radius=cylindreRadius, height=cylindreHeight)
axisZ = maya.cmds.polyCylinder(name='basisAxisZ', subdivisionsAxis=cylindreResolution, radius=cylindreRadius, height=cylindreHeight)
# création de 3 cônes qui symbolisent la direction positive des axes X, Y et Z de la base orthonormale
coneResolution = 12
coneRadius = sphereRadius
coneHeight = coneRadius * 3
arrowX = maya.cmds.polyCone(name='basisArrowX', subdivisionsAxis=coneResolution, radius=coneRadius, height=coneHeight)
arrowY = maya.cmds.polyCone(name='basisArrowY', subdivisionsAxis=coneResolution, radius=coneRadius, height=coneHeight)
arrowZ = maya.cmds.polyCone(name='basisArrowZ', subdivisionsAxis=coneResolution, radius=coneRadius, height=coneHeight)
print "\n<construction de la structure hiérarchie>\n"
# mise en hiérarchie des différentes composantes à partir de la racine (ordre: enfant > parent, de la racine jusqu'aux feuilles de l'arborescence)
maya.cmds.parent(origin[0], basis[0])
maya.cmds.parent(axisX[0], "%s|%s" % (basis[0],origin[0]))
maya.cmds.parent(axisY[0], "%s|%s" % (basis[0],origin[0]))
maya.cmds.parent(axisZ[0], "%s|%s" % (basis[0],origin[0]))
maya.cmds.parent(arrowX[0], "%s|%s|%s" % (basis[0], origin[0], axisX[0]))
maya.cmds.parent(arrowY[0], "%s|%s|%s" % (basis[0], origin[0], axisY[0]))
maya.cmds.parent(arrowZ[0], "%s|%s|%s" % (basis[0], origin[0], axisZ[0]))
# références sur les noeuds de transformation dans l'hierarchie
basisTransform = basis[0]
originTransform = "%s|%s" % (basis[0], origin[0])
axisXTransform = "%s|%s|%s" % (basis[0], origin[0], axisX[0])
axisYTransform = "%s|%s|%s" % (basis[0], origin[0], axisY[0])
axisZTransform = "%s|%s|%s" % (basis[0], origin[0], axisZ[0])
arrowXTransform = "%s|%s|%s|%s" % (basis[0], origin[0], axisX[0], arrowX[0])
arrowYTransform = "%s|%s|%s|%s" % (basis[0], origin[0], axisY[0], arrowY[0])
arrowZTransform = "%s|%s|%s|%s" % (basis[0], origin[0], axisZ[0], arrowZ[0])
# calcul des distances relatives au parent
translationAxis = cylindreHeight / 2 + sphereRadius / 2
translationArrow = cylindreHeight / 2 + coneHeight / 2
# translation des cônes (direction de l'axe) par rapport aux cylindres (axes)
maya.cmds.xform(arrowXTransform, objectSpace=True, translation=(0, translationArrow, 0))
maya.cmds.xform(arrowYTransform, objectSpace=True, translation=(0, translationArrow, 0))
maya.cmds.xform(arrowZTransform, objectSpace=True, translation=(0, translationArrow, 0))
# rotation des cylindres (axes)
maya.cmds.xform(axisXTransform, worldSpace=True, rotation=(0, 0 , -90))
# déterminer la direction de l'axe Z selon le type de système de coordonnées
if isLeftHandedSystem: # la direction de l'axe Z est positive
maya.cmds.xform(axisZTransform, worldSpace=True, rotation=(-90, 0, 0))
elif isRightHandedSystem: # la direction de l'axe Z est négative
maya.cmds.xform(axisZTransform, worldSpace=True, rotation=( 90, 0, 0))
# translation des cylindres (axes) par rapport à la sphère (origine)
maya.cmds.xform(axisXTransform, worldSpace=True, translation=(translationAxis, 0, 0))
maya.cmds.xform(axisYTransform, worldSpace=True, translation=(0, translationAxis, 0))
# l'orientation de l'axe Z dépend du type de système de coordonnées (main gauche ou main droite)
if isLeftHandedSystem:
maya.cmds.xform(axisZTransform, worldSpace=True, translation=(0, 0, -translationAxis))
elif isRightHandedSystem:
maya.cmds.xform(axisZTransform, worldSpace=True, translation=(0, 0, translationAxis))
print "\n<transformation de la structure>\n"
# transformation de la racine de l'hiérarchie à la position et l'orientation de l'objet sélectionné ou sinon à l'origine de la scène.
maya.cmds.xform(basisTransform, worldSpace=True, translation=selectionPosition, rotation=selectionRotation)
# coloration des sommets des primitives géométriques
if isInColor:
print "\n<coloration de la structure>\n"
color = [0.618, 0.618, 0.618]
maya.cmds.polyColorPerVertex(originTransform, colorRGB=color, colorDisplayOption=True)
color = [1.0, 0.0, 0.0]
maya.cmds.polyColorPerVertex(axisXTransform, colorRGB=color, colorDisplayOption=True)
maya.cmds.polyColorPerVertex(arrowXTransform, colorRGB=color, colorDisplayOption=True)
color = [0.0, 1.0, 0.0]
maya.cmds.polyColorPerVertex(arrowYTransform, colorRGB=color, colorDisplayOption=True)
maya.cmds.polyColorPerVertex(axisYTransform, colorRGB=color, colorDisplayOption=True)
color = [0.0, 0.0, 1.0]
maya.cmds.polyColorPerVertex(axisZTransform, colorRGB=color, colorDisplayOption=True)
maya.cmds.polyColorPerVertex(arrowZTransform, colorRGB=color, colorDisplayOption=True)
print "\n<fin de l'exécution>\n"
|
mit
| 6,334,168,545,797,120,000 | 44.9 | 146 | 0.76093 | false |
attakei/hieroglyph
|
src/hieroglyph/__init__.py
|
1
|
2964
|
import unicodedata
from hieroglyph import builder
from hieroglyph import directives
from hieroglyph import html
from hieroglyph import slides
def version():
"""Return the installed package version."""
import pkg_resources
return pkg_resources.get_distribution('hieroglyph').version
def setup(app):
# Register Builders
app.add_builder(builder.SlideBuilder)
app.add_builder(builder.DirectorySlideBuilder)
app.add_builder(builder.SingleFileSlideBuilder)
app.add_builder(builder.InlineSlideBuilder)
app.add_builder(builder.DirectoryInlineSlideBuilder)
app.connect('html-collect-pages', slides.get_extra_pages)
# core slide configuration
app.add_config_value('slide_title', None, 'html')
app.add_config_value('slide_theme', 'slides', 'html')
app.add_config_value('slide_levels', 3, 'html')
app.add_config_value('slide_theme_options', {}, 'html')
app.add_config_value('slide_theme_path', [], 'html')
app.add_config_value('slide_numbers', False, 'html')
app.add_config_value('slide_footer', None, 'html')
app.add_config_value('autoslides', True, 'env')
# support for linking html output to slides
app.add_config_value('slide_link_html_to_slides', False, 'html')
app.add_config_value('slide_link_html_sections_to_slides', False, 'html')
app.add_config_value('slide_relative_path', '../slides/', 'html')
app.add_config_value('slide_html_slide_link_symbol',
unicodedata.lookup('section sign'), 'html')
# support for linking from slide output to html
app.add_config_value('slide_link_to_html', False, 'html')
app.add_config_value('slide_html_relative_path', '../html/', 'html')
# slide-related directives
app.add_node(directives.if_slides)
app.add_directive('ifnotslides', directives.IfBuildingSlides)
app.add_directive('ifslides', directives.IfBuildingSlides)
app.add_directive('notslides', directives.IfBuildingSlides)
app.add_directive('slides', directives.IfBuildingSlides)
app.add_transform(directives.TransformSlideConditions)
app.add_node(directives.slideconf,
html=(directives.raiseSkip, None),
latex=(directives.raiseSkip, None),
text=(directives.raiseSkip, None),
man=(directives.raiseSkip, None),
texinfo=(directives.raiseSkip, None),
)
app.add_directive('slideconf', directives.SlideConf)
app.connect('doctree-resolved', directives.process_slideconf_nodes)
app.add_node(directives.slide)
app.add_directive('slide', directives.SlideDirective)
app.connect('doctree-resolved', directives.process_slide_nodes)
app.add_node(directives.nextslide)
app.add_directive('nextslide', directives.NextSlideDirective)
app.add_transform(directives.TransformNextSlides)
app.connect('builder-inited', html.inspect_config)
app.connect('html-page-context', html.add_link)
|
bsd-3-clause
| -2,948,857,152,409,221,600 | 38 | 77 | 0.70108 | false |
rajalokan/keystone
|
keystone/token/persistence/backends/sql.py
|
1
|
12167
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import functools
from oslo_log import log
from oslo_utils import timeutils
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone import token
from keystone.token.providers import common
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenModel(sql.ModelBase, sql.ModelDictMixinWithExtras):
__tablename__ = 'token'
attributes = ['id', 'expires', 'user_id', 'trust_id']
id = sql.Column(sql.String(64), primary_key=True)
expires = sql.Column(sql.DateTime(), default=None)
extra = sql.Column(sql.JsonBlob())
valid = sql.Column(sql.Boolean(), default=True, nullable=False)
user_id = sql.Column(sql.String(64))
trust_id = sql.Column(sql.String(64))
__table_args__ = (
sql.Index('ix_token_expires', 'expires'),
sql.Index('ix_token_expires_valid', 'expires', 'valid'),
sql.Index('ix_token_user_id', 'user_id'),
sql.Index('ix_token_trust_id', 'trust_id')
)
def _expiry_upper_bound_func():
# don't flush anything within the grace window
sec = datetime.timedelta(seconds=CONF.token.allow_expired_window)
return timeutils.utcnow() - sec
def _expiry_range_batched(session, upper_bound_func, batch_size):
"""Return the stop point of the next batch for expiration.
Return the timestamp of the next token that is `batch_size` rows from
being the oldest expired token.
"""
# This expiry strategy splits the tokens into roughly equal sized batches
# to be deleted. It does this by finding the timestamp of a token
# `batch_size` rows from the oldest token and yielding that to the caller.
# It's expected that the caller will then delete all rows with a timestamp
# equal to or older than the one yielded. This may delete slightly more
# tokens than the batch_size, but that should be ok in almost all cases.
LOG.debug('Token expiration batch size: %d', batch_size)
query = session.query(TokenModel.expires)
query = query.filter(TokenModel.expires < upper_bound_func())
query = query.order_by(TokenModel.expires)
query = query.offset(batch_size - 1)
query = query.limit(1)
while True:
try:
next_expiration = query.one()[0]
except sql.NotFound:
# There are less than `batch_size` rows remaining, so fall
# through to the normal delete
break
yield next_expiration
yield upper_bound_func()
def _expiry_range_all(session, upper_bound_func):
"""Expire all tokens in one pass."""
yield upper_bound_func()
class Token(token.persistence.TokenDriverBase):
# Public interface
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id=token_id)
with sql.session_for_read() as session:
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
return token_ref.to_dict()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
if not data_copy.get('expires'):
data_copy['expires'] = common.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
token_ref = TokenModel.from_dict(data_copy)
token_ref.valid = True
with sql.session_for_write() as session:
session.add(token_ref)
return token_ref.to_dict()
def delete_token(self, token_id):
with sql.session_for_write() as session:
token_ref = session.query(TokenModel).get(token_id)
if not token_ref or not token_ref.valid:
raise exception.TokenNotFound(token_id=token_id)
token_ref.valid = False
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Delete all tokens in one session.
The user_id will be ignored if the trust_id is specified. user_id
will always be specified.
If using a trust, the token's user_id is set to the trustee's user ID
or the trustor's user ID, so will use trust_id to query the tokens.
"""
token_list = []
with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter_by(valid=True)
query = query.filter(TokenModel.expires > now)
if trust_id:
query = query.filter(TokenModel.trust_id == trust_id)
else:
query = query.filter(TokenModel.user_id == user_id)
for token_ref in query.all():
if tenant_id:
token_ref_dict = token_ref.to_dict()
if not self._tenant_matches(tenant_id, token_ref_dict):
continue
if consumer_id:
token_ref_dict = token_ref.to_dict()
if not self._consumer_matches(consumer_id, token_ref_dict):
continue
token_ref.valid = False
token_list.append(token_ref.id)
return token_list
def _tenant_matches(self, tenant_id, token_ref_dict):
return ((tenant_id is None) or
(token_ref_dict.get('tenant') and
token_ref_dict['tenant'].get('id') == tenant_id))
def _consumer_matches(self, consumer_id, ref):
if consumer_id is None:
return True
else:
try:
oauth = ref['token_data']['token'].get('OS-OAUTH1', {})
return oauth and oauth['consumer_id'] == consumer_id
except KeyError:
return False
def _list_tokens_for_trust(self, trust_id):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.trust_id == trust_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens_for_user(self, user_id, tenant_id=None):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._tenant_matches(tenant_id, token_ref_dict):
tokens.append(token_ref['id'])
return tokens
def _list_tokens_for_consumer(self, user_id, consumer_id):
tokens = []
with sql.session_for_write() as session:
now = timeutils.utcnow()
query = session.query(TokenModel)
query = query.filter(TokenModel.expires > now)
query = query.filter(TokenModel.user_id == user_id)
token_references = query.filter_by(valid=True)
for token_ref in token_references:
token_ref_dict = token_ref.to_dict()
if self._consumer_matches(consumer_id, token_ref_dict):
tokens.append(token_ref_dict['id'])
return tokens
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return []
if trust_id:
return self._list_tokens_for_trust(trust_id)
if consumer_id:
return self._list_tokens_for_consumer(user_id, consumer_id)
else:
return self._list_tokens_for_user(user_id, tenant_id)
def list_revoked_tokens(self):
with sql.session_for_read() as session:
tokens = []
now = timeutils.utcnow()
query = session.query(TokenModel.id, TokenModel.expires,
TokenModel.extra)
query = query.filter(TokenModel.expires > now)
token_references = query.filter_by(valid=False)
for token_ref in token_references:
token_data = token_ref[2]['token_data']
if 'access' in token_data:
# It's a v2 token.
audit_ids = token_data['access']['token']['audit_ids']
else:
# It's a v3 token.
audit_ids = token_data['token']['audit_ids']
record = {
'id': token_ref[0],
'expires': token_ref[1],
'audit_id': audit_ids[0],
}
tokens.append(record)
return tokens
def _expiry_range_strategy(self, dialect):
"""Choose a token range expiration strategy.
Based on the DB dialect, select an expiry range callable that is
appropriate.
"""
# DB2 and MySQL can both benefit from a batched strategy. On DB2 the
# transaction log can fill up and on MySQL w/Galera, large
# transactions can exceed the maximum write set size.
if dialect == 'ibm_db_sa':
# Limit of 100 is known to not fill a transaction log
# of default maximum size while not significantly
# impacting the performance of large token purges on
# systems where the maximum transaction log size has
# been increased beyond the default.
return functools.partial(_expiry_range_batched,
batch_size=100)
elif dialect == 'mysql':
# We want somewhat more than 100, since Galera replication delay is
# at least RTT*2. This can be a significant amount of time if
# doing replication across a WAN.
return functools.partial(_expiry_range_batched,
batch_size=1000)
return _expiry_range_all
def flush_expired_tokens(self):
with sql.session_for_write() as session:
# Turn off autocommit, as it doesn't work well with batch delete
session.autocommit = False
dialect = session.bind.dialect.name
expiry_range_func = self._expiry_range_strategy(dialect)
query = session.query(TokenModel.expires)
total_removed = 0
upper_bound_func = _expiry_upper_bound_func
for expiry_time in expiry_range_func(session, upper_bound_func):
delete_query = query.filter(TokenModel.expires <=
expiry_time)
row_count = delete_query.delete(synchronize_session=False)
# Explicitly commit each batch so as to free up
# resources early. We do not actually need
# transactional semantics here.
session.commit()
total_removed += row_count
LOG.debug('Removed %d total expired tokens', total_removed)
session.flush()
LOG.info('Total expired tokens removed: %d', total_removed)
|
apache-2.0
| 407,838,895,909,105,800 | 39.828859 | 79 | 0.592422 | false |
jmesteve/openerp
|
openerp/addons/account_balance_reporting/account_balance_reporting_report.py
|
1
|
21960
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account balance reporting engine
# Copyright (C) 2009 Pexego Sistemas Informáticos. All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Account balance report objects
Generic account balance report document (with header and detail lines).
Designed following the needs of the
Spanish/Spain localization.
"""
from openerp.osv import orm,fields
from openerp.tools.translate import _
import re
import time
import netsvc
import logging
# CSS classes for the account line templates
CSS_CLASSES = [('default','Default'),('l1', 'Level 1'), ('l2', 'Level 2'),
('l3', 'Level 3'), ('l4', 'Level 4'), ('l5', 'Level 5')]
class account_balance_reporting(orm.Model):
"""
Account balance report.
It stores the configuration/header fields of an account balance report,
and the linked lines of detail with the values of the accounting concepts
(values generated from the selected template lines of detail formulas).
"""
_name = "account.balance.reporting"
_columns = {
'name': fields.char('Name', size=64, required=True, select=True),
'template_id': fields.many2one('account.balance.reporting.template',
'Template', ondelete='set null', required=True, select=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'calc_date': fields.datetime("Calculation date", readonly=True),
'state': fields.selection([('draft','Draft'),
('calc','Processing'),
('calc_done','Processed'),
('done','Done'),
('canceled','Canceled')], 'State'),
'company_id': fields.many2one('res.company', 'Company',
ondelete='cascade', required=True, readonly=True,
states={'draft': [('readonly', False)]}),
'current_fiscalyear_id': fields.many2one('account.fiscalyear',
'Fiscal year 1', select=True, required=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'current_period_ids': fields.many2many('account.period',
'account_balance_reporting_account_period_current_rel',
'account_balance_reporting_id', 'period_id',
'Fiscal year 1 periods',
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'previous_fiscalyear_id': fields.many2one('account.fiscalyear',
'Fiscal year 2', select=True,
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'previous_period_ids': fields.many2many('account.period',
'account_balance_reporting_account_period_previous_rel',
'account_balance_reporting_id', 'period_id',
'Fiscal year 2 periods',
states={'calc_done': [('readonly', True)],
'done': [('readonly', True)]}),
'line_ids': fields.one2many('account.balance.reporting.line',
'report_id', 'Lines',
states = {'done': [('readonly', True)]}),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context).company_id.id,
'state': 'draft',
}
def action_calculate(self, cr, uid, ids, context=None):
"""Called when the user presses the Calculate button.
It will use the report template to generate lines of detail for the
report with calculated values."""
if context is None:
context = {}
line_obj = self.pool.get('account.balance.reporting.line')
# Set the state to 'calculating'
self.write(cr, uid, ids, {
'state': 'calc',
'calc_date': time.strftime('%Y-%m-%d %H:%M:%S')
})
for report in self.browse(cr, uid, ids, context=context):
# Clear the report data (unlink the lines of detail)
line_obj.unlink(cr, uid, [line.id for line in report.line_ids],
context=context)
# Fill the report with a 'copy' of the lines of its template (if it has one)
if report.template_id:
for template_line in report.template_id.line_ids:
line_obj.create(cr, uid, {
'code': template_line.code,
'name': template_line.name,
'report_id': report.id,
'template_line_id': template_line.id,
'parent_id': None,
'current_value': None,
'previous_value': None,
'sequence': template_line.sequence,
'css_class': template_line.css_class,
}, context=context)
# Set the parents of the lines in the report
# Note: We reload the reports objects to refresh the lines of detail.
for report in self.browse(cr, uid, ids, context=context):
if report.template_id:
# Set line parents (now that they have been created)
for line in report.line_ids:
tmpl_line = line.template_line_id
if tmpl_line and tmpl_line.parent_id:
parent_line_ids = line_obj.search(cr, uid,
[('report_id', '=', report.id),
('code', '=', tmpl_line.parent_id.code)])
line_obj.write(cr, uid, line.id, {
'parent_id': (parent_line_ids and
parent_line_ids[0] or False),
}, context=context)
# Calculate the values of the lines
# Note: We reload the reports objects to refresh the lines of detail.
for report in self.browse(cr, uid, ids, context=context):
if report.template_id:
# Refresh the report's lines values
for line in report.line_ids:
line.refresh_values()
# Set the report as calculated
self.write(cr, uid, [report.id], {
'state': 'calc_done'
}, context=context)
else:
# Ouch! no template: Going back to draft state.
self.write(cr, uid, [report.id], {'state': 'draft'},
context=context)
return True
def action_confirm(self, cr, uid, ids, context=None):
"""Called when the user clicks the confirm button."""
self.write(cr, uid, ids, {'state': 'done'}, context=context)
return True
def action_cancel(self, cr, uid, ids, context=None):
"""Called when the user clicks the cancel button."""
self.write(cr, uid, ids, {'state': 'canceled'}, context=context)
return True
def action_recover(self, cr, uid, ids, context=None):
"""Called when the user clicks the draft button to create
a new workflow instance."""
self.write(cr, uid, ids, {'state': 'draft', 'calc_date': None},
context=context)
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_create(uid, 'account.balance.reporting', id, cr)
return True
def calculate_action(self, cr, uid, ids, context=None):
"""Calculate the selected balance report data."""
for id in ids:
# Send the calculate signal to the balance report to trigger
# action_calculate.
wf_service = netsvc.LocalService('workflow')
wf_service.trg_validate(uid, 'account.balance.reporting', id,
'calculate', cr)
return 'close'
class account_balance_reporting_line(orm.Model):
"""
Account balance report line / Accounting concept
One line of detail of the balance report representing an accounting
concept with its values.
The accounting concepts follow a parent-children hierarchy.
Its values (current and previous) are calculated based on the 'value'
formula of the linked template line.
"""
_name = "account.balance.reporting.line"
_columns = {
'report_id': fields.many2one('account.balance.reporting', 'Report',
ondelete='cascade'),
'sequence': fields.integer('Sequence', required=True),
'code': fields.char('Code', size=64, required=True, select=True),
'name': fields.char('Name', size=256, required=True, select=True),
'notes': fields.text('Notes'),
'current_value': fields.float('Fiscal year 1', digits=(16,2)),
'previous_value': fields.float('Fiscal year 2', digits=(16,2)),
'calc_date': fields.datetime("Calculation date"),
'css_class': fields.selection(CSS_CLASSES, 'CSS Class'),
'template_line_id': fields.many2one(
'account.balance.reporting.template.line',
'Line template', ondelete='set null'),
'parent_id': fields.many2one('account.balance.reporting.line',
'Parent', ondelete='cascade'),
'child_ids': fields.one2many('account.balance.reporting.line',
'parent_id', 'Children'),
}
_defaults = {
'report_id': lambda self, cr, uid, context: context.get('report_id', None),
'css_class': 'default',
}
_order = "sequence, code"
_sql_constraints = [
('report_code_uniq', 'unique(report_id, code)',
_("The code must be unique for this report!"))
]
def name_get(self, cr, uid, ids, context=None):
"""Redefine the method to show the code in the name ("[code] name")."""
res = []
for item in self.browse(cr, uid, ids, context=context):
res.append((item.id, "[%s] %s" % (item.code, item.name)))
return res
def name_search(self, cr, uid, name, args=[], operator='ilike',
context=None, limit=80):
"""Redefine the method to allow searching by code."""
ids = []
if name:
ids = self.search(cr, uid, [('code','ilike',name)]+ args,
limit=limit, context=context)
if not ids:
ids = self.search(cr, uid, [('name',operator,name)]+ args,
limit=limit, context=context)
return self.name_get(cr, uid, ids, context=context)
def refresh_values(self, cr, uid, ids, context=None):
"""
Recalculates the values of this report line using the
linked line report values formulas:
Depending on this formula the final value is calculated as follows:
- Empy report value: sum of (this concept) children values.
- Number with decimal point ("10.2"): that value (constant).
- Account numbers separated by commas ("430,431,(437)"): Sum of the account balances.
(The sign of the balance depends on the balance mode)
- Concept codes separated by "+" ("11000+12000"): Sum of those concepts values.
"""
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
tmpl_line = line.template_line_id
balance_mode = int(tmpl_line.template_id.balance_mode)
current_value = 0.0
previous_value = 0.0
report = line.report_id
# We use the same code to calculate both fiscal year values,
# just iterating over them.
for fyear in ('current', 'previous'):
value = 0
if fyear == 'current':
tmpl_value = tmpl_line.current_value
elif fyear == 'previous':
tmpl_value = (tmpl_line.previous_value or
tmpl_line.current_value)
# Remove characters after a ";" (we use ; for comments)
if tmpl_value:
tmpl_value = tmpl_value.split(';')[0]
if (fyear == 'current' and not report.current_fiscalyear_id) \
or (fyear == 'previous' and not report.previous_fiscalyear_id):
value = 0
else:
if not tmpl_value:
# Empy template value => sum of the children values
for child in line.child_ids:
if child.calc_date != child.report_id.calc_date:
# Tell the child to refresh its values
child.refresh_values()
# Reload the child data
child = self.browse(cr, uid, child.id,
context=context)
if fyear == 'current':
value += child.current_value
elif fyear == 'previous':
value += child.previous_value
elif re.match(r'^\-?[0-9]*\.[0-9]*$', tmpl_value):
# Number with decimal points => that number value
# (constant).
value = float(tmpl_value)
elif re.match(r'^[0-9a-zA-Z,\(\)\*_\ ]*$', tmpl_value):
# Account numbers separated by commas => sum of the
# account balances. We will use the context to filter
# the accounts by fiscalyear and periods.
ctx = context.copy()
if fyear == 'current':
ctx.update({
'fiscalyear': report.current_fiscalyear_id.id,
'periods': [p.id for p in report.current_period_ids],
})
elif fyear == 'previous':
ctx.update({
'fiscalyear': report.previous_fiscalyear_id.id,
'periods': [p.id for p in report.previous_period_ids],
})
value = line._get_account_balance(tmpl_value,
balance_mode, ctx)
elif re.match(r'^[\+\-0-9a-zA-Z_\*\ ]*$', tmpl_value):
# Account concept codes separated by "+" => sum of the
# concepts (template lines) values.
for line_code in re.findall(r'(-?\(?[0-9a-zA-Z_]*\)?)',
tmpl_value):
sign = 1
if line_code.startswith('-') or \
(line_code.startswith('(') and
balance_mode in (2, 4)):
sign = -1
line_code = line_code.strip('-()*')
# findall might return empty strings
if line_code:
# Search for the line (perfect match)
line_ids = self.search(cr, uid, [
('report_id','=', report.id),
('code', '=', line_code),
], context=context)
for child in self.browse(cr, uid, line_ids,
context=context):
if child.calc_date != child.report_id.calc_date:
child.refresh_values()
# Reload the child data
child = self.browse(cr, uid, child.id,
context=context)
if fyear == 'current':
value += child.current_value * sign
elif fyear == 'previous':
value += child.previous_value * sign
# Negate the value if needed
if tmpl_line.negate:
value = -value
if fyear == 'current':
current_value = value
elif fyear == 'previous':
previous_value = value
# Write the values
self.write(cr, uid, line.id, {
'current_value': current_value,
'previous_value': previous_value,
'calc_date': line.report_id.calc_date,
}, context=context)
return True
def _get_account_balance(self, cr, uid, ids, code, balance_mode=0,
context=None):
"""
It returns the (debit, credit, balance*) tuple for a account with the
given code, or the sum of those values for a set of accounts
when the code is in the form "400,300,(323)"
Depending on the balance_mode, the balance is calculated as follows:
Mode 0: debit-credit for all accounts (default);
Mode 1: debit-credit, credit-debit for accounts in brackets;
Mode 2: credit-debit for all accounts;
Mode 3: credit-debit, debit-credit for accounts in brackets.
Also the user may specify to use only the debit or credit of the account
instead of the balance writing "debit(551)" or "credit(551)".
"""
acc_obj = self.pool.get('account.account')
logger = logging.getLogger(__name__)
res = 0.0
line = self.browse(cr, uid, ids[0], context=context)
company_id = line.report_id.company_id.id
# We iterate over the accounts listed in "code", so code can be
# a string like "430+431+432-438"; accounts split by "+" will be added,
# accounts split by "-" will be substracted.
for acc_code in re.findall('(-?\w*\(?[0-9a-zA-Z_]*\)?)', code):
# Check if the code is valid (findall might return empty strings)
acc_code = acc_code.strip()
if acc_code:
# Check the sign of the code (substraction)
if acc_code.startswith('-'):
sign = -1
acc_code = acc_code[1:].strip() # Strip the sign
else:
sign = 1
if re.match(r'^debit\(.*\)$', acc_code):
# Use debit instead of balance
mode = 'debit'
acc_code = acc_code[6:-1] # Strip debit()
elif re.match(r'^credit\(.*\)$', acc_code):
# Use credit instead of balance
mode = 'credit'
acc_code = acc_code[7:-1] # Strip credit()
else:
mode = 'balance'
# Calculate sign of the balance mode
sign_mode = 1
if balance_mode in (1, 2, 3):
# for accounts in brackets or mode 2, the sign is reversed
if (acc_code.startswith('(') and acc_code.endswith(')')) \
or balance_mode == 2:
sign_mode = -1
# Strip the brackets (if any)
if acc_code.startswith('(') and acc_code.endswith(')'):
acc_code = acc_code[1:-1]
# Search for the account (perfect match)
account_ids = acc_obj.search(cr, uid, [
('code', '=', acc_code),
('company_id','=', company_id)
], context=context)
if not account_ids:
# Search for a subaccount ending with '0'
account_ids = acc_obj.search(cr, uid, [
('code', '=like', '%s%%0' % acc_code),
('company_id','=', company_id)
], context=context)
if not account_ids:
logger.warning("Account with code '%s' not found!"
%acc_code)
for account in acc_obj.browse(cr, uid, account_ids,
context=context):
if mode == 'debit':
res += account.debit * sign
elif mode == 'credit':
res += account.credit * sign
else:
res += account.balance * sign * sign_mode
return res
|
agpl-3.0
| 6,735,523,408,756,101,000 | 49.020501 | 124 | 0.491234 | false |
richardotis/pycalphad-fitting
|
fit.py
|
1
|
2193
|
"""
Automated fitting script.
"""
import os
import sys
import fnmatch
import argparse
import logging
import multiprocessing
from paramselect import fit, load_datasets
from distributed import Client, LocalCluster
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--dask-scheduler",
metavar="HOST:PORT",
help="Host and port of dask distributed scheduler")
parser.add_argument(
"--iter-record",
metavar="FILE",
help="Output file for recording iterations (CSV)")
parser.add_argument(
"--tracefile",
metavar="FILE",
help="Output file for recording MCMC trace (HDF5)")
parser.add_argument(
"--fit-settings",
metavar="FILE",
default="input.json",
help="Input JSON file with settings for fit")
parser.add_argument(
"--input-tdb",
metavar="FILE",
default=None,
help="Input TDB file, with desired degrees of freedom to fit specified as FUNCTIONs starting with 'VV'")
parser.add_argument(
"--output-tdb",
metavar="FILE",
default="out.tdb",
help="Output TDB file")
def recursive_glob(start, pattern):
matches = []
for root, dirnames, filenames in os.walk(start):
for filename in fnmatch.filter(filenames, pattern):
matches.append(os.path.join(root, filename))
return sorted(matches)
if __name__ == '__main__':
args = parser.parse_args(sys.argv[1:])
if not args.dask_scheduler:
args.dask_scheduler = LocalCluster(n_workers=int(multiprocessing.cpu_count() / 2), threads_per_worker=1, nanny=True)
client = Client(args.dask_scheduler)
logging.info(
"Running with dask scheduler: %s [%s cores]" % (
args.dask_scheduler,
sum(client.ncores().values())))
datasets = load_datasets(sorted(recursive_glob('Al-Ni', '*.json')))
recfile = open(args.iter_record, 'a') if args.iter_record else None
tracefile = args.tracefile if args.tracefile else None
try:
dbf, mdl, model_dof = fit(args.fit_settings, datasets, scheduler=client, recfile=recfile, tracefile=tracefile)
finally:
if recfile:
recfile.close()
dbf.to_file(args.output_tdb, if_exists='overwrite')
|
mit
| 7,039,180,532,096,761,000 | 28.24 | 124 | 0.668947 | false |
JeremyMorgan/Raspberry_Pi_Temperature
|
temp-humidity.py
|
1
|
1864
|
import os
import glob
import subprocess
import calendar
import time
import urllib2
import json
import Adafruit_DHT
#initialize
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
sensor = Adafruit_DHT.DHT22
pin = 8
#Temperature device
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
# Opens raw device, code changed to reflect issue in Raspian
def read_temp_raw():
catdata = subprocess.Popen(['cat',device_file], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out,err = catdata.communicate()
out_decode = out.decode('utf-8')
lines = out_decode.split('\n')
return lines
# Reads temperature, outputs farenheit
def read_temp():
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
temp_c = float(temp_string) / 1000.0
temp_f = temp_c * 9.0 / 5.0 + 32.0
return temp_f
# Try to grab a sensor reading. Use the read_retry method which will retry up
# to 15 times to get a sensor reading (waiting 2 seconds between each retry).
humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)
url = 'http://[YOUR WEBSITE]/api/status/'
postdata = {
'tempFahrenheit' : str(read_temp()),
'tempCelcius' : temperature,
'humidity' : humidity
}
req = urllib2.Request(url)
req.add_header('Content-Type','application/json')
data = json.dumps(postdata)
response = urllib2.urlopen(req,data)
#add in Azure
url2 = 'http://[YOUR WEBSITE]/api/Reading/'
postdata = {
'temp': str(read_temp())
}
req2 = urllib2.Request(url2)
req2.add_header('Content-Type','application/json')
data2 = json.dumps(postdata)
response = urllib2.urlopen(req2,data2)
|
gpl-2.0
| -9,041,252,855,166,964,000 | 24.888889 | 99 | 0.674356 | false |
christiansandberg/canopen
|
examples/simple_ds402_node.py
|
1
|
4328
|
import canopen
import sys
import os
import traceback
import time
try:
# Start with creating a network representing one CAN bus
network = canopen.Network()
# Connect to the CAN bus
network.connect(bustype='kvaser', channel=0, bitrate=1000000)
network.check()
# Add some nodes with corresponding Object Dictionaries
node = canopen.BaseNode402(35, 'eds/e35.eds')
network.add_node(node)
# network.add_node(34, 'eds/example34.eds')
# node = network[34]
# Reset network
node.nmt.state = 'RESET COMMUNICATION'
#node.nmt.state = 'RESET'
node.nmt.wait_for_bootup(15)
print('node state 1) = {0}'.format(node.nmt.state))
# Iterate over arrays or records
error_log = node.sdo[0x1003]
for error in error_log.values():
print("Error {0} was found in the log".format(error.raw))
for node_id in network:
print(network[node_id])
print('node state 2) = {0}'.format(node.nmt.state))
# Read a variable using SDO
node.sdo[0x1006].raw = 1
node.sdo[0x100c].raw = 100
node.sdo[0x100d].raw = 3
node.sdo[0x1014].raw = 163
node.sdo[0x1003][0].raw = 0
# Transmit SYNC every 100 ms
network.sync.start(0.1)
node.load_configuration()
print('node state 3) = {0}'.format(node.nmt.state))
node.setup_402_state_machine()
device_name = node.sdo[0x1008].raw
vendor_id = node.sdo[0x1018][1].raw
print(device_name)
print(vendor_id)
node.state = 'SWITCH ON DISABLED'
print('node state 4) = {0}'.format(node.nmt.state))
# Read PDO configuration from node
node.tpdo.read()
# Re-map TxPDO1
node.tpdo[1].clear()
node.tpdo[1].add_variable('Statusword')
node.tpdo[1].add_variable('Velocity actual value')
node.tpdo[1].trans_type = 1
node.tpdo[1].event_timer = 0
node.tpdo[1].enabled = True
# Save new PDO configuration to node
node.tpdo.save()
# publish the a value to the control word (in this case reset the fault at the motors)
node.rpdo.read()
node.rpdo[1]['Controlword'].raw = 0x80
node.rpdo[1].transmit()
node.rpdo[1]['Controlword'].raw = 0x81
node.rpdo[1].transmit()
node.state = 'READY TO SWITCH ON'
node.state = 'SWITCHED ON'
node.rpdo.export('database.dbc')
# -----------------------------------------------------------------------------------------
print('Node booted up')
timeout = time.time() + 15
node.state = 'READY TO SWITCH ON'
while node.state != 'READY TO SWITCH ON':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
timeout = time.time() + 15
node.state = 'SWITCHED ON'
while node.state != 'SWITCHED ON':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
timeout = time.time() + 15
node.state = 'OPERATION ENABLED'
while node.state != 'OPERATION ENABLED':
if time.time() > timeout:
raise Exception('Timeout when trying to change state')
time.sleep(0.001)
print('Node Status {0}'.format(node.powerstate_402.state))
# -----------------------------------------------------------------------------------------
node.nmt.start_node_guarding(0.01)
while True:
try:
network.check()
except Exception:
break
# Read a value from TxPDO1
node.tpdo[1].wait_for_reception()
speed = node.tpdo[1]['Velocity actual value'].phys
# Read the state of the Statusword
statusword = node.sdo[0x6041].raw
print('statusword: {0}'.format(statusword))
print('VEL: {0}'.format(speed))
time.sleep(0.01)
except KeyboardInterrupt:
pass
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
traceback.print_exc()
finally:
# Disconnect from CAN bus
print('going to exit... stopping...')
if network:
for node_id in network:
node = network[node_id]
node.nmt.state = 'PRE-OPERATIONAL'
node.nmt.stop_node_guarding()
network.sync.stop()
network.disconnect()
|
mit
| -1,015,862,993,798,331,600 | 26.392405 | 95 | 0.594732 | false |
USGSDenverPychron/pychron
|
pychron/experiment/tests/analysis_grouping_test.py
|
1
|
3212
|
from pychron.experiment.automated_run.persistence import AutomatedRunPersister
from pychron.experiment.automated_run.spec import AutomatedRunSpec
__author__ = 'ross'
import unittest
runs = [('references', 'bu-j-1', 'b1'), ('A', '11111', 'u1'),
('references', 'bu-j-2', 'b2'), ('B','22222','u2'), ('B','33333','u3')]
class MockDB(object):
def __init__(self):
self.groups = {}
self._cnt = 0
def get_analysis_group(self, prj, **kw):
if not prj in self.groups:
self.groups[prj] = MockAnalysisGroup()
return self.groups[prj]
def add_analysis_group_set(self, ag, analysis):
ag.analyses.append(analysis)
print 'adding {}, {}'.format(analysis.uuid, len(ag.analyses))
def get_last_analysis(self, **kw):
if self._cnt==0:
pass
else:
m = MockAnalysis()
m.uuid='12345'
m.project='A'
return m
self._cnt+=1
def get_analysis_uuid(self, u):
t = next((r for r in runs if r[2]==u), None)
if t:
m=MockAnalysis()
m.uuid=u
m.rid = t[1]
return m
class MockAnalysisGroup(object):
def __init__(self):
self.analyses = []
class MockAnalysis(object):
project = ''
@property
def project_name(self):
return self.project
class MockRunSpec(object):
project = ''
def gen_mock_analysis():
for _, _, u in runs:
m = MockAnalysis()
m.uuid = u
yield m
def gen_mock_runspec():
for p, l, _ in runs:
rs = AutomatedRunSpec()
rs.project = p
rs.labnumber = l
yield rs
class MyTestCase(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.persister =
def setUp(self):
self.mock_analyses = gen_mock_analysis()
self.runspecs = gen_mock_runspec()
self.persister =AutomatedRunPersister()
self.db = MockDB()
def test_save_group(self):
per = self.persister
db = self.db
per.run_spec = self.runspecs.next()
analysis = self.mock_analyses.next()
per._save_analysis_group(db, analysis)
self.assertEqual(True, True)
def test_save_group_sequence1(self):
per = self.persister
db = self.db
for i in range(3):
per.run_spec = self.runspecs.next()
analysis = self.mock_analyses.next()
per._save_analysis_group(db, analysis)
for a in db.groups['A-autogen'].analyses:
print a.uuid
uuids = [a.uuid for a in db.groups['A-autogen'].analyses]
self.assertEqual(uuids, ['u1','b1','b2'])
def test_save_group_sequence2(self):
per = self.persister
db = self.db
for i in range(5):
per.run_spec = self.runspecs.next()
analysis = self.mock_analyses.next()
per._save_analysis_group(db, analysis)
# for a in db.groups['B-autogen'].analyses:
# print a.uuid
uuids = [a.uuid for a in db.groups['B-autogen'].analyses]
self.assertEqual(uuids, ['u2','b2','u3'])
if __name__ == '__main__':
unittest.main()
|
apache-2.0
| -8,337,034,916,786,816,000 | 23.707692 | 79 | 0.554172 | false |
zielmicha/hera
|
hera/webapp/urls.py
|
1
|
1682
|
from django.conf.urls import include, url, patterns
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView, TemplateView
from hera.webapp import account_views
from hera.webapp import sandbox_views
from hera.webapp import run_views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^accounts/profile', RedirectView.as_view(url='/account')),
url(r'^account$', RedirectView.as_view(url='/account/')),
url(r'^users/', RedirectView.as_view(url='/account')),
url(r'^$', TemplateView.as_view(template_name='main.html')),
url(r'^sandbox/(.+)/$', sandbox_views.Sandbox.as_view()),
url(r'^account/$', account_views.UserOverview.as_view()),
url(r'^account/billing$', account_views.UserBilling.as_view()),
url(r'^account/(.+)/overview$', account_views.AccountOverview.as_view()),
url(r'^account/(.+)/api$', account_views.AccountAPI.as_view()),
url(r'^account/(.+)/templates$', account_views.AccountTemplates.as_view()),
url(r'^run/(\d+)/$', run_views.MainView.as_view()),
url(r'^run/attach$', run_views.attach),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
agpl-3.0
| 6,931,740,068,921,254,000 | 59.071429 | 98 | 0.580856 | false |
fu-berlin-swp-2014/center-points
|
centerpoints/visualise.py
|
1
|
19882
|
import time
import colorsys
import sys
from PySide import QtGui, QtOpenGL, QtCore
from PySide.QtCore import QTimer, SIGNAL
from PySide.QtGui import QLabel
import copy
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
import centerpoints.data_set
class Color:
def __init__(self, color=None):
if type(color) is np.ndarray:
self._color = color
elif color is None:
self._color = (1, 1, 1, 1)
elif type(color) is int:
self._color = (color & 0x00FF0000 >> 16,
color & 0x0000FF00 >> 8,
color & 0x000000FF,
color & 0xFF000000 >> 24)
elif type(color) == tuple:
self._color = color
else:
raise ValueError(str(type(color)) + " is not supported")
if type(self._color) is tuple:
if max(self._color) > 1:
self._color = (c / 255 for c in self._color)
def check_size(self, size):
if self._color == np.ndarray:
assert size == len(gl_array(self._color.flatten().tolist()))
def gl_array(self, n_points):
if type(self._color) is np.ndarray:
arr = self._color
else:
arr = np.zeros((n_points + 2, 4))
for i, c in enumerate(self._color):
arr[:, i].fill(c)
return gl_array(arr.flatten().tolist())
class Camera(object):
ISOMETRIC = 0
PROJECTIVE = 1
MODES = [ISOMETRIC, PROJECTIVE]
mode = PROJECTIVE
x, y, z = 0, 0, 512
rotx, roty, rotz = 30, -45, 0
w, h = 640, 480
far = 2048
fov = 60
def set_size(self, width, height):
""" Adjust window size.
"""
self.w, self.h = width, height
glViewport(0, 0, width, height)
self.update_projection()
def update_projection(self):
if self.mode == self.ISOMETRIC:
self.isometric()
elif self.mode == self.PROJECTIVE:
self.perspective()
def isometric(self):
self.mode = self.ISOMETRIC
""" Isometric projection.
"""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(-self.w/2., self.w/2., -self.h/2., self.h/2., 0, self.far)
glMatrixMode(GL_MODELVIEW)
def perspective(self):
""" Perspective projection.
"""
self.mode = self.PROJECTIVE
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(self.fov, float(self.w)/self.h, 0.1, self.far)
glMatrixMode(GL_MODELVIEW)
def drag(self, x, y, dx, dy, button, modifiers):
""" Mouse drag event handler. """
if button == 1:
self.x -= dx*2
self.y -= dy*2
elif button == 2:
self.x -= dx*2
self.z -= dy*2
elif button == 4:
self.roty += dx/4.
self.rotx -= dy/4.
def apply(self):
""" Apply camera transformation. """
glLoadIdentity()
glTranslatef(-self.x, -self.y, -self.z)
glRotatef(self.rotx, 1, 0, 0)
glRotatef(self.roty, 0, 1, 0)
glRotatef(self.rotz, 0, 0, 1)
def gl_array(list):
""" Converts a list to GLFloat list.
"""
return (GLdouble * len(list))(*list)
def draw_vertex_array(vertices, colors=None, mode=GL_LINES):
""" Draw a vertex array.
"""
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_COLOR_ARRAY)
if colors is None:
colors = Color()
gl_colors = colors.gl_array(len(vertices) / 3)
glVertexPointer(3, GL_DOUBLE, 0, vertices)
glColorPointer(4, GL_DOUBLE, 0, gl_colors)
glDrawArrays(mode, 0, int(len(vertices) / 3))
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_COLOR_ARRAY)
# TODO:
def draw_point(point, color, size=1):
glPointSize(size)
glBegin(GL_POINTS)
glColor4f(*color)
glVertex3f(*point)
glEnd()
glPointSize(1)
def numpy2polygon(points):
# if (points[0] != points[-1]).any():
# points = np.vstack((points, points[0, :]))
return points
def numpy2polygons(polygons):
return np.array([numpy2polygon(p) for p in polygons], dtype=np.float32)
class VisualisationController(QtGui.QMainWindow):
def __init__(self, data=None, data_name="Data Points", n_data=1000,
dim_data=3, data_func=None, algorithm=None, parent=None):
QtGui.QMainWindow.__init__(self, parent)
import centerpoints.iterated_radon
# _visualisation and _central_widget will be set in
# update_visualisation()
self._visualisation = None
self._central_widget = None
self.n_data = n_data
self.dim_data = dim_data
self._data_name = "Data Points"
self._scale_factor = 100
if data is not None:
self._data = data
else:
if data_func is None:
data_func = centerpoints.data_set.sphere_volume
self._data_name = "Sphere Volume"
self._data_func = data_func
self._data = self._data_func(self.n_data, self.dim_data)
if algorithm is None:
algorithm = centerpoints.iterated_radon.IteratedRadon()
self._algorithm = algorithm
# Setup Gui
self._algo_menu = QtGui.QMenu("Algorithms", self)
algorithms = {
# Add new algorithms here
"Iterated Radon": centerpoints.iterated_radon.IteratedRadon()
}
for name, cls in algorithms.items():
action = self._algo_menu.addAction(name)
action.triggered.connect(
lambda c=cls: self.set_algorithm(c)
)
self.menuBar().addMenu(self._algo_menu)
self._data_menu = QtGui.QMenu("Data Sets", self)
data_sets = {
"Cube Volume": centerpoints.data_set.cube,
"Cube Surface": centerpoints.data_set.cube_surface,
"Normal Distribution": centerpoints.data_set.normal_distribution,
"Sphere Surface": centerpoints.data_set.sphere_surface,
"Sphere Volume": centerpoints.data_set.sphere_volume,
}
for name, func in sorted(data_sets.items()):
self._data_menu.addAction(name).triggered.connect(
lambda n=name, f=func: self.set_data_func(n, f)
)
self.menuBar().addMenu(self._data_menu)
self.update_visualisation()
self.setWindowTitle(self.tr("Centerpoints"))
def set_data(self, name, data):
self._data_name = name
self._data = data
self.update_visualisation()
def set_algorithm(self, algo_class):
self._algorithm = algo_class
self.update_visualisation()
def set_data_func(self, name, data_func):
self._data_name = name
self._data_func = data_func
self.update_visualisation()
def update_visualisation(self):
if self._data_func is not None:
self._data = \
self._scale_factor * self._data_func(self.n_data, self.dim_data)
self._visualisation = Visualisation(
start_step=Step(description=self._data_name))
self._visualisation.points(self._data, (0.5, 0.5, 0.5, 0.5))
self._algorithm.visualisation(self._data,
self._visualisation)
if self._central_widget is not None:
self._central_widget.deleteLater()
self._central_widget = VisualisationWidget(
self._visualisation, parent=self)
self.setCentralWidget(self._central_widget)
class VisualisationWidget(QtGui.QWidget):
def __init__(self, visualisation, parent=None):
QtGui.QWidget.__init__(self, parent)
self._visualisation = visualisation
self._glWidget = GLWidget(visualisation)
self._stepsList = QtGui.QListWidget()
self._stepsList.setMaximumWidth(200)
self._stepsList.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
for i, s in enumerate(self._visualisation._steps):
self._stepsList.addItem(s.description)
self._mainLayout = QtGui.QHBoxLayout()
self._mainLayout.addWidget(self._stepsList)
self._glOptionLayout = QtGui.QVBoxLayout()
self._glOptionLayout.addWidget(self._glWidget)
self._optionLayout = QtGui.QHBoxLayout()
self._animate_btn = QtGui.QPushButton("Animate")
self._animate_btn.setCheckable(True)
self._animate_btn.clicked.connect(
lambda: self._glWidget.toggle_animation())
self._optionLayout.addWidget(self._animate_btn)
self._animate_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self._animate_slider.valueChanged.connect(
lambda s: self._glWidget.set_animation_speed((100 - s) / 10))
self._optionLayout.addWidget(QLabel("Animation Speed:"))
self._optionLayout.addWidget(self._animate_slider)
self._show_axis_btn = QtGui.QPushButton("Show Coordinate System")
self._show_axis_btn.setCheckable(True)
self._show_axis_btn.clicked.connect(
self._glWidget.toggle_axis)
self._optionLayout.addWidget(self._show_axis_btn)
self._glOptionLayout.addLayout(self._optionLayout)
self._mainLayout.addLayout(self._glOptionLayout)
self._stepsList.selectAll()
self._stepsList.connect(SIGNAL("itemSelectionChanged()"),
self.changedSelection)
self.setLayout(self._mainLayout)
def changedSelection(self):
indexes = [i.row() for i in self._stepsList.selectedIndexes()]
steps = self._visualisation._steps
for i in range(len(steps)):
steps[i].selected = i in indexes
class GLWidget(QtOpenGL.QGLWidget):
KEY_DOWN_ROTATE = [QtCore.Qt.Key_Down]
KEY_TOP_ROTATE = [QtCore.Qt.Key_Up]
KEY_LEFT_ROTATE = [QtCore.Qt.Key_Left]
KEY_RIGHT_ROTATE = [QtCore.Qt.Key_Right]
KEY_FARER = [QtCore.Qt.Key_W, QtCore.Qt.Key_Plus]
KEY_NEARER = [QtCore.Qt.Key_S, QtCore.Qt.Key_Minus]
def __init__(self, visualisation, parent=None, cam=None, fps=30):
QtOpenGL.QGLWidget.__init__(self, parent)
if cam is None:
cam = Camera()
self._visualisation = visualisation
self.steps_delay = 1
self.axis_factor = 50
self._start_time = time.time()
self.animation_speed = 1
self.animation = False
self.show_axis = True
self.cam = cam
self.initOpenGL()
self.last_mouse_pos = QtCore.QPoint()
self.timer = QTimer()
self.connect(self.timer, SIGNAL("timeout()"), self.updateGL)
self.timer.start()
self.timer.setInterval(1000 / fps)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
def initOpenGL(self):
""" Initial OpenGL configuration. """
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glDepthFunc(GL_LEQUAL)
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.cam.apply()
self._draw_visualisation()
def repaint(self, *args, **kwargs):
self.paintGL()
def resizeGL(self, width, height):
self.cam.set_size(width, height)
def mousePressEvent(self, event):
self.last_mouse_pos = QtCore.QPoint(event.pos())
def mouseMoveEvent(self, event):
dx = event.x() - self.last_mouse_pos.x()
dy = event.y() - self.last_mouse_pos.y()
if event.buttons() & QtCore.Qt.LeftButton:
self.cam.rotx += dy
self.cam.roty += dx
elif event.buttons() & QtCore.Qt.RightButton:
self.cam.rotx += 8 * dy
self.cam.roty += 8 * dx
self.last_mouse_pos = QtCore.QPoint(event.pos())
def keyPressEvent(self, event):
# TODO: refactor
key = event.key()
delta = 10
if key == QtCore.Qt.Key_P:
self.cam.perspective()
elif key == QtCore.Qt.Key_I:
self.cam.isometric()
elif key == QtCore.Qt.Key_W:
self.cam.z -= delta
elif key == QtCore.Qt.Key_S:
self.cam.z += delta
elif key == QtCore.Qt.Key_A:
self.cam.x -= delta
elif key == QtCore.Qt.Key_D:
self.cam.x += delta
elif self._is_in(key, self.KEY_LEFT_ROTATE):
self.cam.roty -= 4.
elif self._is_in(key, self.KEY_RIGHT_ROTATE):
self.cam.roty += 4.
elif self._is_in(key, self.KEY_TOP_ROTATE):
self.cam.rotx += 4
elif self._is_in(key, self.KEY_DOWN_ROTATE):
self.cam.rotx -= 4
@staticmethod
def _is_in(buttons, keys):
for k in keys:
if buttons == k:
return True
return False
def set_animation_speed(self, speed):
" set speed between 0 and 100, where 10 is equals to a second"
self.animation_speed = speed / 10
def toggle_axis(self):
self.show_axis = not self.show_axis
def axis(self):
""" Define vertices and colors for 3 planes
"""
d = self.axis_factor
vertices, colors = [], []
#XZ RED
vertices.extend([-d, 0, -d, d, 0, -d, d, 0, d, -d, 0, d])
for i in range(4):
colors.extend([1, 0, 0, 0.5])
#YZ GREEN
vertices.extend([0, -d, -d, 0, -d, d, 0, d, d, 0, d, -d])
for i in range(4):
colors.extend([0, 1, 0, 0.5])
#XY BLUE
vertices.extend([-d, -d, 0, d, -d, 0, d, d, 0, -d, d, 0])
for i in range(4):
colors.extend([0, 0, 1, 0.5])
return gl_array(vertices), Color(np.array(colors))
def draw_axis(self):
""" Draw the 3 planes """
vertices, colors = self.axis()
glEnable(GL_DEPTH_TEST)
draw_vertex_array(vertices, colors, GL_QUADS)
glDisable(GL_DEPTH_TEST)
def _draw_visualisation(self):
if self.show_axis:
self.draw_axis()
selected_steps = [s for s in self._visualisation._steps if s.selected]
n = len(selected_steps)
if self.animation:
n_visible = (time.time() % ((n+2)*self.animation_speed)) \
/ self.animation_speed - 1
else:
n_visible = n
for i, s in enumerate(selected_steps):
if i < n_visible:
s.draw()
def toggle_animation(self):
self.animation = not self.animation
def speed_up(self):
self.animation_speed += 1
def speed_down(self):
self.animation_speed -= 1
class Visualisation():
def __init__(self, start_step=None):
if start_step is None:
start_step = Step()
self._current_step = start_step
self._steps = [self._current_step]
def add(self, elem):
self._current_step.add(elem)
def point(self, point, color, size):
self.add(Points(np.array([point]), color, size))
def points(self, points, color):
self.add(Points(points, color))
def polygon(self, points, color):
if points[1, :] != points[-1, :]:
points = np.vstack((points, points[-1, :]))
self.add(Polygons(np.array([points]), color))
def next_step(self, step):
self._current_step = step
self._steps.append(step)
def next(self, description):
self._current_step = Step(description)
self._steps.append(self._current_step)
def show(self):
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication(sys.argv)
widget = VisualisationWidget(self)
widget.show()
app.exec_()
class ColorGroup:
def __init__(self):
self.n_groups = 0
def next_member(self):
self.n_groups += 1
return ColorGroupMember(self, self.n_groups - 1)
def get_color(self, index):
h = index / self.n_groups
s = 1
v = 1
r = 4
alpha = index / ((1 - 1/r)*self.n_groups) + 1/r
return colorsys.hsv_to_rgb(h, s, v) + (alpha, )
class ColorGroupMember:
def __init__(self, color_group, i):
self._color_group = color_group
self._i = i
def get_color(self):
return self._color_group.get_color(self._i)
class Step:
def __init__(self, description=""):
self.description = description
self._elems = []
self.selected = True
def add(self, elem):
self._elems.append(elem)
def draw(self):
for elem in self._elems:
elem.draw()
def qtwidget(self):
return QLabel(self.description)
class AnimationStep(object):
def __init__(self):
self.start = 0
self.end = sys.maxsize
def is_active(self, current_step):
return self.start <= current_step <= self.end
class Polygons(AnimationStep):
def __init__(self, polygons, colors=None, wireframe=None):
"""
Add a polygon to the visualisation step.
:param wireframe:
:param polygons: ndarray with a (n, m, 3)-shape, where n is the number
of polygons and m is the number of points of a polygon.
:param colors: ndarray with a (p, 4)-shape, where p is the total
number points. Each row represents a color
`[r, g, b, a]`.
"""
super(Polygons, self).__init__()
self._polygons = numpy2polygons(polygons)
self._n_polygons, _, _ = self._polygons.shape
self._colors = Color(colors)
def draw(self):
glLineWidth(1)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glEnable(GL_DEPTH_TEST)
for polygon in self._polygons:
gl_polygon = gl_array(polygon.flatten().tolist())
draw_vertex_array(gl_polygon, self._colors, GL_POLYGON)
glDisable(GL_DEPTH_TEST)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
class ConvexHulls(Polygons):
pass
class RadonPartition(AnimationStep):
# FIXME: generalise color from ColorGroup
def __init__(self, smaller_points, bigger_points, radon_point, color):
super(RadonPartition, self).__init__()
self._smaller = smaller_points
self._bigger = bigger_points
self._radon_point = radon_point
self._color = color
def draw(self):
color = self._color.get_color()
ConvexHulls([self._smaller], color).draw()
ConvexHulls([self._bigger], color).draw()
draw_point(self._radon_point, color)
class PointGroups():
def __init__(self):
self._groups = []
def new_group(self, points):
self._groups.append(Points(points))
for i, group in enumerate(self._groups):
rgb = self.color(i)
group.set_color(rgb)
def draw(self):
for group in self._groups:
group.draw()
def color(self, i):
n = len(self._groups)
h = i / n
s = 1
v = 1
return colorsys.hsv_to_rgb(h, s, v) + (1, ) # set alpha
class Points(AnimationStep):
def __init__(self, points, colors=None, size=1):
super(Points, self).__init__()
self._n_points, _ = points.shape
self._points = points.astype(np.float32)
self._colors = Color(colors)
self._size = size
def set_color(self, color):
self._colors = Color(color)
def draw(self):
glPointSize(self._size)
draw_vertex_array(gl_array(self._points.flatten().tolist()),
self._colors, GL_POINTS)
glPointSize(1)
class Gui:
def __init__(self):
self.app = QtGui.QApplication(sys.argv)
window = VisualisationController()
window.show()
def run_gui():
gui = Gui()
gui.app.exec_()
|
mit
| 1,970,213,918,985,893,400 | 29.447167 | 85 | 0.572427 | false |
dbaty/deform_ext_autocomplete
|
docs/conf.py
|
1
|
2104
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
import datetime
import pkg_resources
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Extended Autocomplete'
this_year = datetime.date.today().year
copyright = f'{this_year}, Damien Baty'
author = 'Damien Baty'
version = pkg_resources.get_distribution("deform_ext_autocomplete").version
release = version
# -- General configuration ---------------------------------------------------
master_doc = 'index' # no file extension
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'deform': ('http://docs.pylonsproject.org/projects/deform/en/latest/', None),
'colander': ('http://docs.pylonsproject.org/projects/colander/en/latest', None),
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
|
bsd-3-clause
| 3,511,380,996,741,106,700 | 33.491803 | 84 | 0.656369 | false |
rajalokan/keystone
|
keystone/identity/backends/ldap/common.py
|
1
|
71718
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import codecs
import functools
import os.path
import re
import sys
import weakref
import ldap.controls
import ldap.filter
import ldappool
from oslo_log import log
from oslo_utils import reflection
import six
from six.moves import map, zip
from keystone.common import driver_hints
from keystone import exception
from keystone.i18n import _
LOG = log.getLogger(__name__)
LDAP_VALUES = {'TRUE': True, 'FALSE': False}
LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL,
'sub': ldap.SCOPE_SUBTREE}
LDAP_DEREF = {'always': ldap.DEREF_ALWAYS,
'default': None,
'finding': ldap.DEREF_FINDING,
'never': ldap.DEREF_NEVER,
'searching': ldap.DEREF_SEARCHING}
LDAP_TLS_CERTS = {'never': ldap.OPT_X_TLS_NEVER,
'demand': ldap.OPT_X_TLS_DEMAND,
'allow': ldap.OPT_X_TLS_ALLOW}
# RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to
# indicate that no attributes should be returned besides the DN.
DN_ONLY = ['1.1']
_utf8_encoder = codecs.getencoder('utf-8')
# FIXME(knikolla): This enables writing to the LDAP backend
# Only enabled during tests and unsupported
WRITABLE = False
def utf8_encode(value):
"""Encode a basestring to UTF-8.
If the string is unicode encode it to UTF-8, if the string is
str then assume it's already encoded. Otherwise raise a TypeError.
:param value: A basestring
:returns: UTF-8 encoded version of value
:raises TypeError: If value is not basestring
"""
if isinstance(value, six.text_type):
return _utf8_encoder(value)[0]
elif isinstance(value, six.binary_type):
return value
else:
value_cls_name = reflection.get_class_name(
value, fully_qualified=False)
raise TypeError("value must be basestring, "
"not %s" % value_cls_name)
_utf8_decoder = codecs.getdecoder('utf-8')
def utf8_decode(value):
"""Decode a from UTF-8 into unicode.
If the value is a binary string assume it's UTF-8 encoded and decode
it into a unicode string. Otherwise convert the value from its
type into a unicode string.
:param value: value to be returned as unicode
:returns: value as unicode
:raises UnicodeDecodeError: for invalid UTF-8 encoding
"""
if isinstance(value, six.binary_type):
return _utf8_decoder(value)[0]
return six.text_type(value)
def py2ldap(val):
"""Type convert a Python value to a type accepted by LDAP (unicode).
The LDAP API only accepts strings for values therefore convert
the value's type to a unicode string. A subsequent type conversion
will encode the unicode as UTF-8 as required by the python-ldap API,
but for now we just want a string representation of the value.
:param val: The value to convert to a LDAP string representation
:returns: unicode string representation of value.
"""
if isinstance(val, bool):
return u'TRUE' if val else u'FALSE'
else:
return six.text_type(val)
def enabled2py(val):
"""Similar to ldap2py, only useful for the enabled attribute."""
try:
return LDAP_VALUES[val]
except KeyError: # nosec
# It wasn't a boolean value, will try as an int instead.
pass
try:
return int(val)
except ValueError: # nosec
# It wasn't an int either, will try as utf8 instead.
pass
return utf8_decode(val)
def ldap2py(val):
"""Convert an LDAP formatted value to Python type used by OpenStack.
Virtually all LDAP values are stored as UTF-8 encoded strings.
OpenStack prefers values which are unicode friendly.
:param val: LDAP formatted value
:returns: val converted to preferred Python type
"""
return utf8_decode(val)
def convert_ldap_result(ldap_result):
"""Convert LDAP search result to Python types used by OpenStack.
Each result tuple is of the form (dn, attrs), where dn is a string
containing the DN (distinguished name) of the entry, and attrs is
a dictionary containing the attributes associated with the
entry. The keys of attrs are strings, and the associated values
are lists of strings.
OpenStack wants to use Python types of its choosing. Strings will
be unicode, truth values boolean, whole numbers int's, etc. DN's will
also be decoded from UTF-8 to unicode.
:param ldap_result: LDAP search result
:returns: list of 2-tuples containing (dn, attrs) where dn is unicode
and attrs is a dict whose values are type converted to
OpenStack preferred types.
"""
py_result = []
at_least_one_referral = False
for dn, attrs in ldap_result:
ldap_attrs = {}
if dn is None:
# this is a Referral object, rather than an Entry object
at_least_one_referral = True
continue
for kind, values in attrs.items():
try:
val2py = enabled2py if kind == 'enabled' else ldap2py
ldap_attrs[kind] = [val2py(x) for x in values]
except UnicodeDecodeError:
LOG.debug('Unable to decode value for attribute %s', kind)
py_result.append((utf8_decode(dn), ldap_attrs))
if at_least_one_referral:
LOG.debug(('Referrals were returned and ignored. Enable referral '
'chasing in keystone.conf via [ldap] chase_referrals'))
return py_result
def safe_iter(attrs):
if attrs is None:
return
elif isinstance(attrs, list):
for e in attrs:
yield e
else:
yield attrs
def parse_deref(opt):
try:
return LDAP_DEREF[opt]
except KeyError:
raise ValueError(_('Invalid LDAP deref option: %(option)s. '
'Choose one of: %(options)s') %
{'option': opt,
'options': ', '.join(LDAP_DEREF.keys()), })
def parse_tls_cert(opt):
try:
return LDAP_TLS_CERTS[opt]
except KeyError:
raise ValueError(_(
'Invalid LDAP TLS certs option: %(option)s. '
'Choose one of: %(options)s') % {
'option': opt,
'options': ', '.join(LDAP_TLS_CERTS.keys())})
def ldap_scope(scope):
try:
return LDAP_SCOPES[scope]
except KeyError:
raise ValueError(
_('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {
'scope': scope,
'options': ', '.join(LDAP_SCOPES.keys())})
def prep_case_insensitive(value):
"""Prepare a string for case-insensitive comparison.
This is defined in RFC4518. For simplicity, all this function does is
lowercase all the characters, strip leading and trailing whitespace,
and compress sequences of spaces to a single space.
"""
value = re.sub(r'\s+', ' ', value.strip().lower())
return value
def is_ava_value_equal(attribute_type, val1, val2):
"""Return True if and only if the AVAs are equal.
When comparing AVAs, the equality matching rule for the attribute type
should be taken into consideration. For simplicity, this implementation
does a case-insensitive comparison.
Note that this function uses prep_case_insenstive so the limitations of
that function apply here.
"""
return prep_case_insensitive(val1) == prep_case_insensitive(val2)
def is_rdn_equal(rdn1, rdn2):
"""Return True if and only if the RDNs are equal.
* RDNs must have the same number of AVAs.
* Each AVA of the RDNs must be the equal for the same attribute type. The
order isn't significant. Note that an attribute type will only be in one
AVA in an RDN, otherwise the DN wouldn't be valid.
* Attribute types aren't case sensitive. Note that attribute type
comparison is more complicated than implemented. This function only
compares case-insentive. The code should handle multiple names for an
attribute type (e.g., cn, commonName, and 2.5.4.3 are the same).
Note that this function uses is_ava_value_equal to compare AVAs so the
limitations of that function apply here.
"""
if len(rdn1) != len(rdn2):
return False
for attr_type_1, val1, dummy in rdn1:
found = False
for attr_type_2, val2, dummy in rdn2:
if attr_type_1.lower() != attr_type_2.lower():
continue
found = True
if not is_ava_value_equal(attr_type_1, val1, val2):
return False
break
if not found:
return False
return True
def is_dn_equal(dn1, dn2):
"""Return True if and only if the DNs are equal.
Two DNs are equal if they've got the same number of RDNs and if the RDNs
are the same at each position. See RFC4517.
Note that this function uses is_rdn_equal to compare RDNs so the
limitations of that function apply here.
:param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(dn1, list):
dn1 = ldap.dn.str2dn(utf8_encode(dn1))
if not isinstance(dn2, list):
dn2 = ldap.dn.str2dn(utf8_encode(dn2))
if len(dn1) != len(dn2):
return False
for rdn1, rdn2 in zip(dn1, dn2):
if not is_rdn_equal(rdn1, rdn2):
return False
return True
def dn_startswith(descendant_dn, dn):
"""Return True if and only if the descendant_dn is under the dn.
:param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
:param dn: Either a string DN or a DN parsed by ldap.dn.str2dn.
"""
if not isinstance(descendant_dn, list):
descendant_dn = ldap.dn.str2dn(utf8_encode(descendant_dn))
if not isinstance(dn, list):
dn = ldap.dn.str2dn(utf8_encode(dn))
if len(descendant_dn) <= len(dn):
return False
# Use the last len(dn) RDNs.
return is_dn_equal(descendant_dn[-len(dn):], dn)
@six.add_metaclass(abc.ABCMeta)
class LDAPHandler(object):
"""Abstract class which defines methods for a LDAP API provider.
Native Keystone values cannot be passed directly into and from the
python-ldap API. Type conversion must occur at the LDAP API
boundary, examples of type conversions are:
* booleans map to the strings 'TRUE' and 'FALSE'
* integer values map to their string representation.
* unicode strings are encoded in UTF-8
In addition to handling type conversions at the API boundary we
have the requirement to support more than one LDAP API
provider. Currently we have:
* python-ldap, this is the standard LDAP API for Python, it
requires access to a live LDAP server.
* Fake LDAP which emulates python-ldap. This is used for
testing without requiring a live LDAP server.
To support these requirements we need a layer that performs type
conversions and then calls another LDAP API which is configurable
(e.g. either python-ldap or the fake emulation).
We have an additional constraint at the time of this writing due to
limitations in the logging module. The logging module is not
capable of accepting UTF-8 encoded strings, it will throw an
encoding exception. Therefore all logging MUST be performed prior
to UTF-8 conversion. This means no logging can be performed in the
ldap APIs that implement the python-ldap API because those APIs
are defined to accept only UTF-8 strings. Thus the layer which
performs type conversions must also do the logging. We do the type
conversions in two steps, once to convert all Python types to
unicode strings, then log, then convert the unicode strings to
UTF-8.
There are a variety of ways one could accomplish this, we elect to
use a chaining technique whereby instances of this class simply
call the next member in the chain via the "conn" attribute. The
chain is constructed by passing in an existing instance of this
class as the conn attribute when the class is instantiated.
Here is a brief explanation of why other possible approaches were
not used:
subclassing
To perform the wrapping operations in the correct order
the type conversion class would have to subclass each of
the API providers. This is awkward, doubles the number of
classes, and does not scale well. It requires the type
conversion class to be aware of all possible API
providers.
decorators
Decorators provide an elegant solution to wrap methods and
would be an ideal way to perform type conversions before
calling the wrapped function and then converting the
values returned from the wrapped function. However
decorators need to be aware of the method signature, it
has to know what input parameters need conversion and how
to convert the result. For an API like python-ldap which
has a large number of different method signatures it would
require a large number of specialized
decorators. Experience has shown it's very easy to apply
the wrong decorator due to the inherent complexity and
tendency to cut-n-paste code. Another option is to
parameterize the decorator to make it "smart". Experience
has shown such decorators become insanely complicated and
difficult to understand and debug. Also decorators tend to
hide what's really going on when a method is called, the
operations being performed are not visible when looking at
the implemation of a decorated method, this too experience
has shown leads to mistakes.
Chaining simplifies both wrapping to perform type conversion as
well as the substitution of alternative API providers. One simply
creates a new instance of the API interface and insert it at the
front of the chain. Type conversions are explicit and obvious.
If a new method needs to be added to the API interface one adds it
to the abstract class definition. Should one miss adding the new
method to any derivations of the abstract class the code will fail
to load and run making it impossible to forget updating all the
derived classes.
"""
def __init__(self, conn=None):
self.conn = conn
@abc.abstractmethod
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None,
debug_level=None, conn_timeout=None, use_pool=None,
pool_size=None, pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def set_option(self, option, invalue):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def get_option(self, option):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def unbind_s(self):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def add_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def modify_s(self, dn, modlist):
raise exception.NotImplemented() # pragma: no cover
class PythonLDAPHandler(LDAPHandler):
"""LDAPHandler implementation which calls the python-ldap API.
Note, the python-ldap API requires all string values to be UTF-8 encoded.
The KeystoneLDAPHandler enforces this prior to invoking the methods in this
class.
"""
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None,
debug_level=None, conn_timeout=None, use_pool=None,
pool_size=None, pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level,
timeout=conn_timeout)
self.conn = ldap.initialize(url)
self.conn.protocol_version = ldap.VERSION3
if alias_dereferencing is not None:
self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing)
self.page_size = page_size
if use_tls:
self.conn.start_tls_s()
if chase_referrals is not None:
self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls)
def unbind_s(self):
return self.conn.unbind_s()
def add_s(self, dn, modlist):
return self.conn.add_s(dn, modlist)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return self.conn.search_s(base, scope, filterstr,
attrlist, attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
# The resp_ctrl_classes parameter is a recent addition to the
# API. It defaults to None. We do not anticipate using it.
# To run with older versions of python-ldap we do not pass it.
return self.conn.result3(msgid, all, timeout)
def modify_s(self, dn, modlist):
return self.conn.modify_s(dn, modlist)
def _common_ldap_initialization(url, use_tls=False, tls_cacertfile=None,
tls_cacertdir=None, tls_req_cert=None,
debug_level=None, timeout=None):
"""LDAP initialization for PythonLDAPHandler and PooledLDAPHandler."""
LOG.debug('LDAP init: url=%s', url)
LOG.debug('LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s '
'tls_req_cert=%s tls_avail=%s',
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, ldap.TLS_AVAIL)
if debug_level is not None:
ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level)
using_ldaps = url.lower().startswith("ldaps")
if timeout is not None and timeout > 0:
# set network connection timeout
ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout)
if use_tls and using_ldaps:
raise AssertionError(_('Invalid TLS / LDAPS combination'))
# The certificate trust options apply for both LDAPS and TLS.
if use_tls or using_ldaps:
if not ldap.TLS_AVAIL:
raise ValueError(_('Invalid LDAP TLS_AVAIL option: %s. TLS '
'not available') % ldap.TLS_AVAIL)
if tls_cacertfile:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isfile(tls_cacertfile):
raise IOError(_("tls_cacertfile %s not found "
"or is not a file") %
tls_cacertfile)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile)
elif tls_cacertdir:
# NOTE(topol)
# python ldap TLS does not verify CACERTFILE or CACERTDIR
# so we add some extra simple sanity check verification
# Also, setting these values globally (i.e. on the ldap object)
# works but these values are ignored when setting them on the
# connection
if not os.path.isdir(tls_cacertdir):
raise IOError(_("tls_cacertdir %s not found "
"or is not a directory") %
tls_cacertdir)
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir)
if tls_req_cert in list(LDAP_TLS_CERTS.values()):
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert)
else:
LOG.debug('LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s',
tls_req_cert)
class MsgId(list):
"""Wrapper class to hold connection and msgid."""
pass
def use_conn_pool(func):
"""Use this only for connection pool specific ldap API.
This adds connection object to decorated API as next argument after self.
"""
def wrapper(self, *args, **kwargs):
# assert isinstance(self, PooledLDAPHandler)
with self._get_pool_connection() as conn:
self._apply_options(conn)
return func(self, conn, *args, **kwargs)
return wrapper
class PooledLDAPHandler(LDAPHandler):
"""LDAPHandler implementation which uses pooled connection manager.
Pool specific configuration is defined in [ldap] section.
All other LDAP configuration is still used from [ldap] section
Keystone LDAP authentication logic authenticates an end user using its DN
and password via LDAP bind to establish supplied password is correct.
This can fill up the pool quickly (as pool re-uses existing connection
based on its bind data) and would not leave space in pool for connection
re-use for other LDAP operations.
Now a separate pool can be established for those requests when related flag
'use_auth_pool' is enabled. That pool can have its own size and
connection lifetime. Other pool attributes are shared between those pools.
If 'use_pool' is disabled, then 'use_auth_pool' does not matter.
If 'use_auth_pool' is not enabled, then connection pooling is not used for
those LDAP operations.
Note, the python-ldap API requires all string values to be UTF-8
encoded. The KeystoneLDAPHandler enforces this prior to invoking
the methods in this class.
"""
# Added here to allow override for testing
Connector = ldappool.StateConnector
auth_pool_prefix = 'auth_pool_'
connection_pools = {} # static connector pool dict
def __init__(self, conn=None, use_auth_pool=False):
super(PooledLDAPHandler, self).__init__(conn=conn)
self.who = ''
self.cred = ''
self.conn_options = {} # connection specific options
self.page_size = None
self.use_auth_pool = use_auth_pool
self.conn_pool = None
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None,
debug_level=None, conn_timeout=None, use_pool=None,
pool_size=None, pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
_common_ldap_initialization(url=url,
use_tls=use_tls,
tls_cacertfile=tls_cacertfile,
tls_cacertdir=tls_cacertdir,
tls_req_cert=tls_req_cert,
debug_level=debug_level,
timeout=pool_conn_timeout)
self.page_size = page_size
# Following two options are not added in common initialization as they
# need to follow a sequence in PythonLDAPHandler code.
if alias_dereferencing is not None:
self.set_option(ldap.OPT_DEREF, alias_dereferencing)
if chase_referrals is not None:
self.set_option(ldap.OPT_REFERRALS, int(chase_referrals))
if self.use_auth_pool: # separate pool when use_auth_pool enabled
pool_url = self.auth_pool_prefix + url
else:
pool_url = url
try:
self.conn_pool = self.connection_pools[pool_url]
except KeyError:
self.conn_pool = ldappool.ConnectionManager(
url,
size=pool_size,
retry_max=pool_retry_max,
retry_delay=pool_retry_delay,
timeout=pool_conn_timeout,
connector_cls=self.Connector,
use_tls=use_tls,
max_lifetime=pool_conn_lifetime)
self.connection_pools[pool_url] = self.conn_pool
def set_option(self, option, invalue):
self.conn_options[option] = invalue
def get_option(self, option):
value = self.conn_options.get(option)
# if option was not specified explicitly, then use connection default
# value for that option if there.
if value is None:
with self._get_pool_connection() as conn:
value = conn.get_option(option)
return value
def _apply_options(self, conn):
# if connection has a lifetime, then it already has options specified
if conn.get_lifetime() > 30:
return
for option, invalue in self.conn_options.items():
conn.set_option(option, invalue)
def _get_pool_connection(self):
return self.conn_pool.connection(self.who, self.cred)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
# Not using use_conn_pool decorator here as this API takes cred as
# input.
self.who = who
self.cred = cred
with self._get_pool_connection() as conn:
self._apply_options(conn)
def unbind_s(self):
# After connection generator is done `with` statement execution block
# connection is always released via finally block in ldappool.
# So this unbind is a no op.
pass
@use_conn_pool
def add_s(self, conn, dn, modlist):
return conn.add_s(dn, modlist)
@use_conn_pool
def search_s(self, conn, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
return conn.search_s(base, scope, filterstr, attrlist,
attrsonly)
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
"""Return a ``MsgId`` instance, it asynchronous API.
The ``MsgId`` instance can be safely used in a call to ``result3()``.
To work with ``result3()`` API in predictable manner, the same LDAP
connection is needed which originally provided the ``msgid``. So, this
method wraps the existing connection and ``msgid`` in a new ``MsgId``
instance. The connection associated with ``search_ext`` is released
once last hard reference to the ``MsgId`` instance is freed.
"""
conn_ctxt = self._get_pool_connection()
conn = conn_ctxt.__enter__()
try:
msgid = conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
except Exception:
conn_ctxt.__exit__(*sys.exc_info())
raise
res = MsgId((conn, msgid))
weakref.ref(res, functools.partial(conn_ctxt.__exit__,
None, None, None))
return res
def result3(self, msgid, all=1, timeout=None,
resp_ctrl_classes=None):
"""Wait for and return the result.
This method returns the result of an operation previously initiated by
one of the LDAP asynchronous operation routines (eg search_ext()). It
returned an invocation identifier (a message id) upon successful
initiation of their operation.
Input msgid is expected to be instance of class MsgId which has LDAP
session/connection used to execute search_ext and message idenfier.
The connection associated with search_ext is released once last hard
reference to MsgId object is freed. This will happen when function
which requested msgId and used it in result3 exits.
"""
conn, msg_id = msgid
return conn.result3(msg_id, all, timeout)
@use_conn_pool
def modify_s(self, conn, dn, modlist):
return conn.modify_s(dn, modlist)
class KeystoneLDAPHandler(LDAPHandler):
"""Convert data types and perform logging.
This LDAP interface wraps the python-ldap based interfaces. The
python-ldap interfaces require string values encoded in UTF-8. The
OpenStack logging framework at the time of this writing is not
capable of accepting strings encoded in UTF-8, the log functions
will throw decoding errors if a non-ascii character appears in a
string.
Prior to the call Python data types are converted to a string
representation as required by the LDAP APIs.
Then logging is performed so we can track what is being
sent/received from LDAP. Also the logging filters security
sensitive items (i.e. passwords).
Then the string values are encoded into UTF-8.
Then the LDAP API entry point is invoked.
Data returned from the LDAP call is converted back from UTF-8
encoded strings into the Python data type used internally in
OpenStack.
"""
def __init__(self, conn=None):
super(KeystoneLDAPHandler, self).__init__(conn=conn)
self.page_size = 0
def __enter__(self):
"""Enter runtime context."""
return self
def _disable_paging(self):
# Disable the pagination from now on
self.page_size = 0
def connect(self, url, page_size=0, alias_dereferencing=None,
use_tls=False, tls_cacertfile=None, tls_cacertdir=None,
tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None,
debug_level=None, conn_timeout=None, use_pool=None,
pool_size=None, pool_retry_max=None, pool_retry_delay=None,
pool_conn_timeout=None, pool_conn_lifetime=None):
self.page_size = page_size
return self.conn.connect(url, page_size, alias_dereferencing,
use_tls, tls_cacertfile, tls_cacertdir,
tls_req_cert, chase_referrals,
debug_level=debug_level,
conn_timeout=conn_timeout,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=pool_retry_max,
pool_retry_delay=pool_retry_delay,
pool_conn_timeout=pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
def set_option(self, option, invalue):
return self.conn.set_option(option, invalue)
def get_option(self, option):
return self.conn.get_option(option)
def simple_bind_s(self, who='', cred='',
serverctrls=None, clientctrls=None):
LOG.debug('LDAP bind: who=%s', who)
who_utf8 = utf8_encode(who)
cred_utf8 = utf8_encode(cred)
return self.conn.simple_bind_s(who_utf8, cred_utf8,
serverctrls=serverctrls,
clientctrls=clientctrls)
def unbind_s(self):
LOG.debug('LDAP unbind')
return self.conn.unbind_s()
def add_s(self, dn, modlist):
ldap_attrs = [(kind, [py2ldap(x) for x in safe_iter(values)])
for kind, values in modlist]
logging_attrs = [(kind, values
if kind != 'userPassword'
else ['****'])
for kind, values in ldap_attrs]
LOG.debug('LDAP add: dn=%s attrs=%s',
dn, logging_attrs)
dn_utf8 = utf8_encode(dn)
ldap_attrs_utf8 = [(kind, [utf8_encode(x) for x in safe_iter(values)])
for kind, values in ldap_attrs]
return self.conn.add_s(dn_utf8, ldap_attrs_utf8)
def search_s(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0):
# NOTE(morganfainberg): Remove "None" singletons from this list, which
# allows us to set mapped attributes to "None" as defaults in config.
# Without this filtering, the ldap query would raise a TypeError since
# attrlist is expected to be an iterable of strings.
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s',
base, scope, filterstr, attrlist, attrsonly)
if self.page_size:
ldap_result = self._paged_search_s(base, scope,
filterstr, attrlist)
else:
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist_utf8 = list(map(utf8_encode, attrlist))
ldap_result = self.conn.search_s(base_utf8, scope,
filterstr_utf8,
attrlist_utf8, attrsonly)
py_result = convert_ldap_result(ldap_result)
return py_result
def search_ext(self, base, scope,
filterstr='(objectClass=*)', attrlist=None, attrsonly=0,
serverctrls=None, clientctrls=None,
timeout=-1, sizelimit=0):
if attrlist is not None:
attrlist = [attr for attr in attrlist if attr is not None]
LOG.debug('LDAP search_ext: base=%s scope=%s filterstr=%s '
'attrs=%s attrsonly=%s '
'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s',
base, scope, filterstr, attrlist, attrsonly,
serverctrls, clientctrls, timeout, sizelimit)
return self.conn.search_ext(base, scope,
filterstr, attrlist, attrsonly,
serverctrls, clientctrls,
timeout, sizelimit)
def _paged_search_s(self, base, scope, filterstr, attrlist=None):
res = []
use_old_paging_api = False
# The API for the simple paged results control changed between
# python-ldap 2.3 and 2.4. We need to detect the capabilities
# of the python-ldap version we are using.
if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'):
use_old_paging_api = True
lc = ldap.controls.SimplePagedResultsControl(
controlType=ldap.LDAP_CONTROL_PAGE_OID,
criticality=True,
controlValue=(self.page_size, ''))
page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID
else:
lc = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=self.page_size,
cookie='')
page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType
base_utf8 = utf8_encode(base)
filterstr_utf8 = utf8_encode(filterstr)
if attrlist is None:
attrlist_utf8 = None
else:
attrlist = [attr for attr in attrlist if attr is not None]
attrlist_utf8 = list(map(utf8_encode, attrlist))
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
# Endless loop request pages on ldap server until it has no data
while True:
# Request to the ldap server a page with 'page_size' entries
rtype, rdata, rmsgid, serverctrls = self.conn.result3(msgid)
# Receive the data
res.extend(rdata)
pctrls = [c for c in serverctrls
if c.controlType == page_ctrl_oid]
if pctrls:
# LDAP server supports pagination
if use_old_paging_api:
est, cookie = pctrls[0].controlValue
lc.controlValue = (self.page_size, cookie)
else:
cookie = lc.cookie = pctrls[0].cookie
if cookie:
# There is more data still on the server
# so we request another page
msgid = self.conn.search_ext(base_utf8,
scope,
filterstr_utf8,
attrlist_utf8,
serverctrls=[lc])
else:
# Exit condition no more data on server
break
else:
LOG.warning('LDAP Server does not support paging. '
'Disable paging in keystone.conf to '
'avoid this message.')
self._disable_paging()
break
return res
def result3(self, msgid=ldap.RES_ANY, all=1, timeout=None,
resp_ctrl_classes=None):
ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes)
LOG.debug('LDAP result3: msgid=%s all=%s timeout=%s '
'resp_ctrl_classes=%s ldap_result=%s',
msgid, all, timeout, resp_ctrl_classes, ldap_result)
# ldap_result returned from result3 is a tuple of
# (rtype, rdata, rmsgid, serverctrls). We don't need use of these,
# except rdata.
rtype, rdata, rmsgid, serverctrls = ldap_result
py_result = convert_ldap_result(rdata)
return py_result
def modify_s(self, dn, modlist):
ldap_modlist = [
(op, kind, (None if values is None
else [py2ldap(x) for x in safe_iter(values)]))
for op, kind, values in modlist]
logging_modlist = [(op, kind, (values if kind != 'userPassword'
else ['****']))
for op, kind, values in ldap_modlist]
LOG.debug('LDAP modify: dn=%s modlist=%s',
dn, logging_modlist)
dn_utf8 = utf8_encode(dn)
ldap_modlist_utf8 = [
(op, kind, (None if values is None
else [utf8_encode(x) for x in safe_iter(values)]))
for op, kind, values in ldap_modlist]
return self.conn.modify_s(dn_utf8, ldap_modlist_utf8)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Exit runtime context, unbind LDAP."""
self.unbind_s()
_HANDLERS = {}
def register_handler(prefix, handler):
_HANDLERS[prefix] = handler
def _get_connection(conn_url, use_pool=False, use_auth_pool=False):
for prefix, handler in _HANDLERS.items():
if conn_url.startswith(prefix):
return handler()
if use_pool:
return PooledLDAPHandler(use_auth_pool=use_auth_pool)
else:
return PythonLDAPHandler()
def filter_entity(entity_ref):
"""Filter out private items in an entity dict.
:param entity_ref: the entity dictionary. The 'dn' field will be removed.
'dn' is used in LDAP, but should not be returned to the user. This
value may be modified.
:returns: entity_ref
"""
if entity_ref:
entity_ref.pop('dn', None)
return entity_ref
class BaseLdap(object):
DEFAULT_OU = None
DEFAULT_STRUCTURAL_CLASSES = None
DEFAULT_ID_ATTR = 'cn'
DEFAULT_OBJECTCLASS = None
DEFAULT_FILTER = None
DEFAULT_EXTRA_ATTR_MAPPING = []
NotFound = None
notfound_arg = None
options_name = None
model = None
attribute_options_names = {}
immutable_attrs = []
attribute_ignore = []
tree_dn = None
def __init__(self, conf):
self.LDAP_URL = conf.ldap.url
self.LDAP_USER = conf.ldap.user
self.LDAP_PASSWORD = conf.ldap.password
self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope)
self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing)
self.page_size = conf.ldap.page_size
self.use_tls = conf.ldap.use_tls
self.tls_cacertfile = conf.ldap.tls_cacertfile
self.tls_cacertdir = conf.ldap.tls_cacertdir
self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert)
self.attribute_mapping = {}
self.chase_referrals = conf.ldap.chase_referrals
self.debug_level = conf.ldap.debug_level
self.conn_timeout = conf.ldap.connection_timeout
# LDAP Pool specific attribute
self.use_pool = conf.ldap.use_pool
self.pool_size = conf.ldap.pool_size
self.pool_retry_max = conf.ldap.pool_retry_max
self.pool_retry_delay = conf.ldap.pool_retry_delay
self.pool_conn_timeout = conf.ldap.pool_connection_timeout
self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime
# End user authentication pool specific config attributes
self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool
self.auth_pool_size = conf.ldap.auth_pool_size
self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime
if self.options_name is not None:
self.tree_dn = (
getattr(conf.ldap, '%s_tree_dn' % self.options_name)
or '%s,%s' % (self.DEFAULT_OU, conf.ldap.suffix))
idatt = '%s_id_attribute' % self.options_name
self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR
objclass = '%s_objectclass' % self.options_name
self.object_class = (getattr(conf.ldap, objclass)
or self.DEFAULT_OBJECTCLASS)
for k, v in self.attribute_options_names.items():
v = '%s_%s_attribute' % (self.options_name, v)
self.attribute_mapping[k] = getattr(conf.ldap, v)
attr_mapping_opt = ('%s_additional_attribute_mapping' %
self.options_name)
attr_mapping = (getattr(conf.ldap, attr_mapping_opt)
or self.DEFAULT_EXTRA_ATTR_MAPPING)
self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping)
ldap_filter = '%s_filter' % self.options_name
self.ldap_filter = getattr(conf.ldap,
ldap_filter) or self.DEFAULT_FILTER
member_attribute = '%s_member_attribute' % self.options_name
self.member_attribute = getattr(conf.ldap, member_attribute, None)
self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES
if self.notfound_arg is None:
self.notfound_arg = self.options_name + '_id'
attribute_ignore = '%s_attribute_ignore' % self.options_name
self.attribute_ignore = getattr(conf.ldap, attribute_ignore)
def _not_found(self, object_id):
if self.NotFound is None:
return exception.NotFound(target=object_id)
else:
return self.NotFound(**{self.notfound_arg: object_id})
@staticmethod
def _parse_extra_attrs(option_list):
mapping = {}
for item in option_list:
try:
ldap_attr, attr_map = item.split(':')
except ValueError:
LOG.warning(
'Invalid additional attribute mapping: "%s". '
'Format must be <ldap_attribute>:<keystone_attribute>',
item)
continue
mapping[ldap_attr] = attr_map
return mapping
def get_connection(self, user=None, password=None, end_user_auth=False):
use_pool = self.use_pool
pool_size = self.pool_size
pool_conn_lifetime = self.pool_conn_lifetime
if end_user_auth:
if not self.use_auth_pool:
use_pool = False
else:
pool_size = self.auth_pool_size
pool_conn_lifetime = self.auth_pool_conn_lifetime
conn = _get_connection(self.LDAP_URL, use_pool,
use_auth_pool=end_user_auth)
conn = KeystoneLDAPHandler(conn=conn)
# The LDAP server may be down or a connection may not
# exist. If that is the case, the bind attempt will
# fail with a server down exception.
try:
conn.connect(self.LDAP_URL,
page_size=self.page_size,
alias_dereferencing=self.alias_dereferencing,
use_tls=self.use_tls,
tls_cacertfile=self.tls_cacertfile,
tls_cacertdir=self.tls_cacertdir,
tls_req_cert=self.tls_req_cert,
chase_referrals=self.chase_referrals,
debug_level=self.debug_level,
conn_timeout=self.conn_timeout,
use_pool=use_pool,
pool_size=pool_size,
pool_retry_max=self.pool_retry_max,
pool_retry_delay=self.pool_retry_delay,
pool_conn_timeout=self.pool_conn_timeout,
pool_conn_lifetime=pool_conn_lifetime)
if user is None:
user = self.LDAP_USER
if password is None:
password = self.LDAP_PASSWORD
# not all LDAP servers require authentication, so we don't bind
# if we don't have any user/pass
if user and password:
conn.simple_bind_s(user, password)
else:
conn.simple_bind_s()
return conn
except ldap.SERVER_DOWN:
raise exception.LDAPServerConnectionError(
url=self.LDAP_URL)
def _id_to_dn_string(self, object_id):
return u'%s=%s,%s' % (self.id_attr,
ldap.dn.escape_dn_chars(
six.text_type(object_id)),
self.tree_dn)
def _id_to_dn(self, object_id):
if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL:
return self._id_to_dn_string(object_id)
with self.get_connection() as conn:
search_result = conn.search_s(
self.tree_dn, self.LDAP_SCOPE,
u'(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' %
{'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'objclass': self.object_class},
attrlist=DN_ONLY)
if search_result:
dn, attrs = search_result[0]
return dn
else:
return self._id_to_dn_string(object_id)
@staticmethod
def _dn_to_id(dn):
return utf8_decode(ldap.dn.str2dn(utf8_encode(dn))[0][0][1])
def _ldap_res_to_model(self, res):
# LDAP attribute names may be returned in a different case than
# they are defined in the mapping, so we need to check for keys
# in a case-insensitive way. We use the case specified in the
# mapping for the model to ensure we have a predictable way of
# retrieving values later.
lower_res = {k.lower(): v for k, v in res[1].items()}
id_attrs = lower_res.get(self.id_attr.lower())
if not id_attrs:
message = _('ID attribute %(id_attr)s not found in LDAP '
'object %(dn)s') % ({'id_attr': self.id_attr,
'dn': res[0]})
raise exception.NotFound(message=message)
if len(id_attrs) > 1:
# FIXME(gyee): if this is a multi-value attribute and it has
# multiple values, we can't use it as ID. Retain the dn_to_id
# logic here so it does not potentially break existing
# deployments. We need to fix our read-write LDAP logic so
# it does not get the ID from DN.
message = ('ID attribute %(id_attr)s for LDAP object %(dn)s '
'has multiple values and therefore cannot be used '
'as an ID. Will get the ID from DN instead') % (
{'id_attr': self.id_attr, 'dn': res[0]})
LOG.warning(message)
id_val = self._dn_to_id(res[0])
else:
id_val = id_attrs[0]
obj = self.model(id=id_val)
for k in obj.known_keys:
if k in self.attribute_ignore:
continue
try:
map_attr = self.attribute_mapping.get(k, k)
if map_attr is None:
# Ignore attributes that are mapped to None.
continue
v = lower_res[map_attr.lower()]
except KeyError: # nosec
# Didn't find the attr, so don't add it.
pass
else:
try:
obj[k] = v[0]
except IndexError:
obj[k] = None
return obj
def affirm_unique(self, values):
if values.get('name') is not None:
try:
self.get_by_name(values['name'])
except exception.NotFound: # nosec
# Didn't find it so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate name, %s.') %
values['name'])
if values.get('id') is not None:
try:
self.get(values['id'])
except exception.NotFound: # nosec
# Didn't find it, so it's unique, good.
pass
else:
raise exception.Conflict(type=self.options_name,
details=_('Duplicate ID, %s.') %
values['id'])
def create(self, values):
self.affirm_unique(values)
object_classes = self.structural_classes + [self.object_class]
attrs = [('objectClass', object_classes)]
for k, v in values.items():
if k in self.attribute_ignore:
continue
if k == 'id':
# no need to check if v is None as 'id' will always have
# a value
attrs.append((self.id_attr, [v]))
elif v is not None:
attr_type = self.attribute_mapping.get(k, k)
if attr_type is not None:
attrs.append((attr_type, [v]))
extra_attrs = [attr for attr, name
in self.extra_attr_mapping.items()
if name == k]
for attr in extra_attrs:
attrs.append((attr, [v]))
with self.get_connection() as conn:
conn.add_s(self._id_to_dn(values['id']), attrs)
return values
def _ldap_get(self, object_id, ldap_filter=None):
query = (u'(&(%(id_attr)s=%(id)s)'
u'%(filter)s'
u'(objectClass=%(object_class)s))'
% {'id_attr': self.id_attr,
'id': ldap.filter.escape_filter_chars(
six.text_type(object_id)),
'filter': (ldap_filter or self.ldap_filter or ''),
'object_class': self.object_class})
with self.get_connection() as conn:
try:
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
res = conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return None
try:
return res[0]
except IndexError:
return None
def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit):
with self.get_connection() as conn:
try:
control = ldap.controls.libldap.SimplePagedResultsControl(
criticality=True,
size=sizelimit,
cookie='')
msgid = conn.search_ext(base, scope, filterstr, attrlist,
serverctrls=[control])
rdata = conn.result3(msgid)
return rdata
except ldap.NO_SUCH_OBJECT:
return []
@driver_hints.truncated
def _ldap_get_all(self, hints, ldap_filter=None):
query = u'(&%s(objectClass=%s)(%s=*))' % (
ldap_filter or self.ldap_filter or '',
self.object_class,
self.id_attr)
sizelimit = 0
attrs = list(set(([self.id_attr] +
list(self.attribute_mapping.values()) +
list(self.extra_attr_mapping.keys()))))
if hints.limit:
sizelimit = hints.limit['limit']
return self._ldap_get_limited(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs,
sizelimit)
with self.get_connection() as conn:
try:
return conn.search_s(self.tree_dn,
self.LDAP_SCOPE,
query,
attrs)
except ldap.NO_SUCH_OBJECT:
return []
def _ldap_get_list(self, search_base, scope, query_params=None,
attrlist=None):
query = u'(objectClass=%s)' % self.object_class
if query_params:
def calc_filter(attrname, value):
val_esc = ldap.filter.escape_filter_chars(value)
return '(%s=%s)' % (attrname, val_esc)
query = (u'(&%s%s)' %
(query, ''.join([calc_filter(k, v) for k, v in
query_params.items()])))
with self.get_connection() as conn:
return conn.search_s(search_base, scope, query, attrlist)
def get(self, object_id, ldap_filter=None):
res = self._ldap_get(object_id, ldap_filter)
if res is None:
raise self._not_found(object_id)
else:
return self._ldap_res_to_model(res)
def get_by_name(self, name, ldap_filter=None):
query = (u'(%s=%s)' % (self.attribute_mapping['name'],
ldap.filter.escape_filter_chars(
six.text_type(name))))
res = self.get_all(query)
try:
return res[0]
except IndexError:
raise self._not_found(name)
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
return [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)]
def update(self, object_id, values, old_obj=None):
if old_obj is None:
old_obj = self.get(object_id)
modlist = []
for k, v in values.items():
if k == 'id':
# id can't be modified.
continue
if k in self.attribute_ignore:
# Handle 'enabled' specially since can't disable if ignored.
if k == 'enabled' and (not v):
action = _("Disabling an entity where the 'enable' "
"attribute is ignored by configuration.")
raise exception.ForbiddenAction(action=action)
continue
# attribute value has not changed
if k in old_obj and old_obj[k] == v:
continue
if k in self.immutable_attrs:
msg = (_("Cannot change %(option_name)s %(attr)s") %
{'option_name': self.options_name, 'attr': k})
raise exception.ValidationError(msg)
if v is None:
if old_obj.get(k) is not None:
modlist.append((ldap.MOD_DELETE,
self.attribute_mapping.get(k, k),
None))
continue
current_value = old_obj.get(k)
if current_value is None:
op = ldap.MOD_ADD
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
elif current_value != v:
op = ldap.MOD_REPLACE
modlist.append((op, self.attribute_mapping.get(k, k), [v]))
if modlist:
with self.get_connection() as conn:
try:
conn.modify_s(self._id_to_dn(object_id), modlist)
except ldap.NO_SUCH_OBJECT:
raise self._not_found(object_id)
return self.get(object_id)
def add_member(self, member_dn, member_list_dn):
"""Add member to the member list.
:param member_dn: DN of member to be added.
:param member_list_dn: DN of group to which the
member will be added.
:raises keystone.exception.Conflict: If the user was already a member.
:raises self.NotFound: If the group entry didn't exist.
"""
with self.get_connection() as conn:
try:
mod = (ldap.MOD_ADD, self.member_attribute, member_dn)
conn.modify_s(member_list_dn, [mod])
except ldap.TYPE_OR_VALUE_EXISTS:
raise exception.Conflict(_('Member %(member)s '
'is already a member'
' of group %(group)s') % {
'member': member_dn,
'group': member_list_dn})
except ldap.NO_SUCH_OBJECT:
raise self._not_found(member_list_dn)
def filter_query(self, hints, query=None):
"""Apply filtering to a query.
:param hints: contains the list of filters, which may be None,
indicating that there are no filters to be applied.
If it's not None, then any filters satisfied here will be
removed so that the caller will know if any filters
remain to be applied.
:param query: LDAP query into which to include filters
:returns query: LDAP query, updated with any filters satisfied
"""
def build_filter(filter_):
"""Build a filter for the query.
:param filter_: the dict that describes this filter
:returns query: LDAP query term to be added
"""
ldap_attr = self.attribute_mapping[filter_['name']]
val_esc = ldap.filter.escape_filter_chars(filter_['value'])
if filter_['case_sensitive']:
# NOTE(henry-nash): Although dependent on the schema being
# used, most LDAP attributes are configured with case
# insensitive matching rules, so we'll leave this to the
# controller to filter.
return
if filter_['name'] == 'enabled':
# NOTE(henry-nash): Due to the different options for storing
# the enabled attribute (e,g, emulated or not), for now we
# don't try and filter this at the driver level - we simply
# leave the filter to be handled by the controller. It seems
# unlikley that this will cause a signifcant performance
# issue.
return
# TODO(henry-nash): Currently there are no booleans (other than
# 'enabled' that is handled above) on which you can filter. If
# there were, we would need to add special handling here to
# convert the booleans values to 'TRUE' and 'FALSE'. To do that
# we would also need to know which filter keys were actually
# booleans (this is related to bug #1411478).
if filter_['comparator'] == 'equals':
query_term = (u'(%(attr)s=%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'contains':
query_term = (u'(%(attr)s=*%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'startswith':
query_term = (u'(%(attr)s=%(val)s*)'
% {'attr': ldap_attr, 'val': val_esc})
elif filter_['comparator'] == 'endswith':
query_term = (u'(%(attr)s=*%(val)s)'
% {'attr': ldap_attr, 'val': val_esc})
else:
# It's a filter we don't understand, so let the caller
# work out if they need to do something with it.
return
return query_term
if query is None:
# make sure query is a string so the ldap filter is properly
# constructed from filter_list later
query = ''
if hints is None:
return query
filter_list = []
satisfied_filters = []
for filter_ in hints.filters:
if filter_['name'] not in self.attribute_mapping:
continue
new_filter = build_filter(filter_)
if new_filter is not None:
filter_list.append(new_filter)
satisfied_filters.append(filter_)
if filter_list:
query = u'(&%s%s)' % (query, ''.join(filter_list))
# Remove satisfied filters, then the caller will know remaining filters
for filter_ in satisfied_filters:
hints.filters.remove(filter_)
return query
class EnabledEmuMixIn(BaseLdap):
"""Emulates boolean 'enabled' attribute if turned on.
Creates a group holding all enabled objects of this class, all missing
objects are considered disabled.
Options:
* $name_enabled_emulation - boolean, on/off
* $name_enabled_emulation_dn - DN of that group, default is
cn=enabled_${name}s,${tree_dn}
* $name_enabled_emulation_use_group_config - boolean, on/off
Where ${name}s is the plural of self.options_name ('users' or 'tenants'),
${tree_dn} is self.tree_dn.
"""
DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
def __init__(self, conf):
super(EnabledEmuMixIn, self).__init__(conf)
enabled_emulation = '%s_enabled_emulation' % self.options_name
self.enabled_emulation = getattr(conf.ldap, enabled_emulation)
enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name
self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn)
use_group_config = ('%s_enabled_emulation_use_group_config' %
self.options_name)
self.use_group_config = getattr(conf.ldap, use_group_config)
if not self.use_group_config:
self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE
self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS
else:
self.member_attribute = conf.ldap.group_member_attribute
self.group_objectclass = conf.ldap.group_objectclass
if not self.enabled_emulation_dn:
naming_attr_name = 'cn'
naming_attr_value = 'enabled_%ss' % self.options_name
sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn)
self.enabled_emulation_dn = '%s=%s,%s' % sub_vals
naming_attr = (naming_attr_name, [naming_attr_value])
else:
# Extract the attribute name and value from the configured DN.
naming_dn = ldap.dn.str2dn(utf8_encode(self.enabled_emulation_dn))
naming_rdn = naming_dn[0][0]
naming_attr = (utf8_decode(naming_rdn[0]),
utf8_decode(naming_rdn[1]))
self.enabled_emulation_naming_attr = naming_attr
def _get_enabled(self, object_id, conn):
dn = self._id_to_dn(object_id)
query = '(%s=%s)' % (self.member_attribute,
ldap.filter.escape_filter_chars(dn))
try:
enabled_value = conn.search_s(self.enabled_emulation_dn,
ldap.SCOPE_BASE,
query, attrlist=DN_ONLY)
except ldap.NO_SUCH_OBJECT:
return False
else:
return bool(enabled_value)
def _add_enabled(self, object_id):
with self.get_connection() as conn:
if not self._get_enabled(object_id, conn):
modlist = [(ldap.MOD_ADD,
self.member_attribute,
[self._id_to_dn(object_id)])]
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except ldap.NO_SUCH_OBJECT:
attr_list = [('objectClass', [self.group_objectclass]),
(self.member_attribute,
[self._id_to_dn(object_id)]),
self.enabled_emulation_naming_attr]
conn.add_s(self.enabled_emulation_dn, attr_list)
def _remove_enabled(self, object_id):
modlist = [(ldap.MOD_DELETE,
self.member_attribute,
[self._id_to_dn(object_id)])]
with self.get_connection() as conn:
try:
conn.modify_s(self.enabled_emulation_dn, modlist)
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec
# It's already gone, good.
pass
def create(self, values):
if self.enabled_emulation:
enabled_value = values.pop('enabled', True)
ref = super(EnabledEmuMixIn, self).create(values)
if 'enabled' not in self.attribute_ignore:
if enabled_value:
self._add_enabled(ref['id'])
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).create(values)
def get(self, object_id, ldap_filter=None):
with self.get_connection() as conn:
ref = super(EnabledEmuMixIn, self).get(object_id, ldap_filter)
if ('enabled' not in self.attribute_ignore and
self.enabled_emulation):
ref['enabled'] = self._get_enabled(object_id, conn)
return ref
def get_all(self, ldap_filter=None, hints=None):
hints = hints or driver_hints.Hints()
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
# had to copy BaseLdap.get_all here to ldap_filter by DN
obj_list = [self._ldap_res_to_model(x)
for x in self._ldap_get_all(hints, ldap_filter)
if x[0] != self.enabled_emulation_dn]
with self.get_connection() as conn:
for obj_ref in obj_list:
obj_ref['enabled'] = self._get_enabled(
obj_ref['id'], conn)
return obj_list
else:
return super(EnabledEmuMixIn, self).get_all(ldap_filter, hints)
def update(self, object_id, values, old_obj=None):
if 'enabled' not in self.attribute_ignore and self.enabled_emulation:
data = values.copy()
enabled_value = data.pop('enabled', None)
ref = super(EnabledEmuMixIn, self).update(object_id, data, old_obj)
if enabled_value is not None:
if enabled_value:
self._add_enabled(object_id)
else:
self._remove_enabled(object_id)
ref['enabled'] = enabled_value
return ref
else:
return super(EnabledEmuMixIn, self).update(
object_id, values, old_obj)
|
apache-2.0
| 5,470,165,449,788,208,000 | 38.90985 | 79 | 0.570345 | false |
threatstream/snort_hpfeeds
|
src/tests.py
|
1
|
2791
|
import unittest
import json
from snort import Alert
class TestSnortParser(unittest.TestCase):
maxDiff = None
def test_parse_buffer_udp(self):
buf = """[**] [1:2003195:5] ET POLICY Unusual number of DNS No Such Name Responses [**]
[Classification: Potentially Bad Traffic] [Priority: 2]
05/04/14-11:49:27.431227 3.3.3.3:53 -> 1.1.1.1:50649
UDP TTL:40 TOS:0x0 ID:3969 IpLen:20 DgmLen:133
Len: 105
[Xref => http://doc.emergingthreats.net/2003195]"""
alert = json.loads(Alert.parse_buffer("12345", buf).to_json())
expected = json.loads('''{"destination_ip": "1.1.1.1", "date": "2014-05-04T11:49:27.431227", "classification": "Potentially Bad Traffic", "proto": "UDP", "source_ip": "3.3.3.3", "priority": "2", "header": "1:2003195:5", "signature": "ET POLICY Unusual number of DNS No Such Name Responses", "source_port": "53", "destination_port": "50649", "sensor": "12345"}''')
self.assertEqual(expected, alert)
def test_parse_buffer_icmp(self):
buf = """[**] [1:486:4] ICMP Destination Unreachable Communication with Destination Host is Administratively Prohibited [**]
[Classification: Misc activity] [Priority: 3]
05/04/14-11:30:01.347127 2.2.2.2 -> 1.1.1.1
ICMP TTL:51 TOS:0x0 ID:63425 IpLen:20 DgmLen:68
Type:3 Code:10 DESTINATION UNREACHABLE: ADMINISTRATIVELY PROHIBITED HOST FILTERED
** ORIGINAL DATAGRAM DUMP:
1.1.1.1:110 -> 2.2.2.2:46722
TCP TTL:49 TOS:0x0 ID:0 IpLen:20 DgmLen:40 DF
Seq: 0x0
(12 more bytes of original packet)
** END OF DUMP"""
alert = json.loads(Alert.parse_buffer("12345", buf).to_json())
expected = json.loads('''{"destination_ip": "1.1.1.1", "classification": "Misc activity", "proto": "ICMP", "source_ip": "2.2.2.2", "priority": "3", "header": "1:486:4", "signature": "ICMP Destination Unreachable Communication with Destination Host is Administratively Prohibited", "date": "2014-05-04T11:30:01.347127", "sensor": "12345"}''')
self.assertEqual(expected, alert)
def test_parse_buffer_tcp(self):
buf = """[**] [1:99999:1] test test [**]
[Priority: 0]
07/18/14-18:37:36.311624 10.254.254.1:58132 -> 10.254.254.100:22
TCP TTL:255 TOS:0x0 ID:12300 IpLen:20 DgmLen:52 DF
***A**** Seq: 0xC468E7DA Ack: 0x98D42D0C Win: 0x202B TcpLen: 32
TCP Options (3) => NOP NOP TS: 669698286 1044016
"""
alert = json.loads(Alert.parse_buffer("12345", buf).to_json())
expected = json.loads('''{"destination_ip": "10.254.254.100", "destination_port": "22", "classification": "", "proto": "TCP", "source_ip": "10.254.254.1", "source_port": "58132", "priority": "0", "header": "1:99999:1", "signature": "test test", "date": "2014-07-18T18:37:36.311624", "sensor": "12345"}''')
self.assertEqual(expected, alert)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
| -1,745,970,661,202,109,700 | 57.145833 | 371 | 0.657829 | false |
gkc1000/pyscf
|
pyscf/geomopt/berny_solver.py
|
1
|
9010
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Interface to geometry optimizer pyberny https://github.com/jhrmnn/pyberny
'''
from __future__ import absolute_import
import pkg_resources
try:
dist = pkg_resources.get_distribution('pyberny')
except pkg_resources.DistributionNotFound:
dist = None
if dist is None or [int(x) for x in dist.version.split('.')] < [0, 6, 2]:
msg = ('Geometry optimizer Pyberny not found or outdated. Install or update '
'with:\n\n\tpip install -U pyberny')
raise ImportError(msg)
import time
import numpy
import logging
from pyscf import lib
from pyscf.geomopt.addons import (as_pyscf_method, dump_mol_geometry,
symmetrize)
from pyscf import __config__
from pyscf.grad.rhf import GradientsBasics
from berny import Berny, geomlib, coords
# Overwrite pyberny's atomic unit
coords.angstrom = 1./lib.param.BOHR
INCLUDE_GHOST = getattr(__config__, 'geomopt_berny_solver_optimize_include_ghost', True)
ASSERT_CONV = getattr(__config__, 'geomopt_berny_solver_optimize_assert_convergence', True)
def to_berny_geom(mol, include_ghost=INCLUDE_GHOST):
atom_charges = mol.atom_charges()
if include_ghost:
# Symbol Ghost is not supported in current version of pyberny
#species = [mol.atom_symbol(i) if z != 0 else 'Ghost'
# for i,z in enumerate(atom_charges)]
species = [mol.atom_symbol(i) if z != 0 else 'H'
for i,z in enumerate(atom_charges)]
coords = mol.atom_coords() * lib.param.BOHR
else:
atmlst = numpy.where(atom_charges != 0)[0] # Exclude ghost atoms
species = [mol.atom_symbol(i) for i in atmlst]
coords = mol.atom_coords()[atmlst] * lib.param.BOHR
# geomlib.Geometry is available in the new version of pyberny solver. (issue #212)
if getattr(geomlib, 'Geometry', None):
return geomlib.Geometry(species, coords)
else:
return geomlib.Molecule(species, coords)
def _geom_to_atom(mol, geom, include_ghost):
coords = geom.coords
if include_ghost:
atom_coords = coords / lib.param.BOHR
else:
atmlst = numpy.where(mol.atom_charges() != 0)[0]
atom_coords = mol.atom_coords()
atom_coords[atmlst] = coords / lib.param.BOHR
return atom_coords
def to_berny_log(pyscf_log):
'''Adapter to allow pyberny to use pyscf.logger
'''
class PyscfHandler(logging.Handler):
def emit(self, record):
pyscf_log.info(record.getMessage())
log = logging.getLogger('{}.{}'.format(__name__, id(pyscf_log)))
log.addHandler(PyscfHandler())
log.setLevel('INFO')
return log
def kernel(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, callback=None, **kwargs):
'''Optimize geometry with pyberny for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'gradientmax': 0.45e-3, # Eh/[Bohr|rad]
'gradientrms': 0.15e-3, # Eh/[Bohr|rad]
'stepmax': 1.8e-3, # [Bohr|rad]
'steprms': 1.2e-3, # [Bohr|rad]
}
from pyscf.geomopt import berny_solver
opt = berny_solver.GeometryOptimizer(method)
opt.params = conv_params
opt.kernel()
'''
t0 = time.clock(), time.time()
mol = method.mol.copy()
if 'log' in kwargs:
log = lib.logger.new_logger(method, kwargs['log'])
elif 'verbose' in kwargs:
log = lib.logger.new_logger(method, kwargs['verbose'])
else:
log = lib.logger.new_logger(method)
if isinstance(method, lib.GradScanner):
g_scanner = method
elif isinstance(method, GradientsBasics):
g_scanner = method.as_scanner()
elif getattr(method, 'nuc_grad_method', None):
g_scanner = method.nuc_grad_method().as_scanner()
else:
raise NotImplementedError('Nuclear gradients of %s not available' % method)
if not include_ghost:
g_scanner.atmlst = numpy.where(method.mol.atom_charges() != 0)[0]
# When symmetry is enabled, the molecule may be shifted or rotated to make
# the z-axis be the main axis. The transformation can cause inconsistency
# between the optimization steps. The transformation is muted by setting
# an explict point group to the keyword mol.symmetry (see symmetry
# detection code in Mole.build function).
if mol.symmetry:
mol.symmetry = mol.topgroup
# temporary interface, taken from berny.py optimize function
berny_log = to_berny_log(log)
geom = to_berny_geom(mol, include_ghost)
optimizer = Berny(geom, logger=berny_log, **kwargs)
t1 = t0
e_last = 0
for cycle, geom in enumerate(optimizer):
if log.verbose >= lib.logger.NOTE:
log.note('\nGeometry optimization cycle %d', cycle+1)
dump_mol_geometry(mol, geom.coords, log)
if mol.symmetry:
geom.coords = symmetrize(mol, geom.coords)
mol.set_geom_(_geom_to_atom(mol, geom, include_ghost), unit='Bohr')
energy, gradients = g_scanner(mol)
log.note('cycle %d: E = %.12g dE = %g norm(grad) = %g', cycle+1,
energy, energy - e_last, numpy.linalg.norm(gradients))
e_last = energy
if callable(callback):
callback(locals())
if assert_convergence and not g_scanner.converged:
raise RuntimeError('Nuclear gradients of %s not converged' % method)
optimizer.send((energy, gradients))
t1 = log.timer('geomoetry optimization cycle %d'%cycle, *t1)
t0 = log.timer('geomoetry optimization', *t0)
return optimizer._converged, mol
def optimize(method, assert_convergence=ASSERT_CONV,
include_ghost=INCLUDE_GHOST, callback=None, **kwargs):
'''Optimize geometry with pyberny for the given method.
To adjust the convergence threshold, parameters can be set in kwargs as
below:
.. code-block:: python
conv_params = { # They are default settings
'gradientmax': 0.45e-3, # Eh/[Bohr|rad]
'gradientrms': 0.15e-3, # Eh/[Bohr|rad]
'stepmax': 1.8e-3, # [Bohr|rad]
'steprms': 1.2e-3, # [Bohr|rad]
}
from pyscf.geomopt import berny_solver
newmol = berny_solver.optimize(method, **conv_params)
'''
return kernel(method, assert_convergence, include_ghost, callback,
**kwargs)[1]
class GeometryOptimizer(lib.StreamObject):
'''Optimize the molecular geometry for the input method.
Note the method.mol will be changed after calling .kernel() method.
'''
def __init__(self, method):
self.method = method
self.callback = None
self.params = {}
self.converged = False
self.max_cycle = 100
@property
def mol(self):
return self.method.mol
@mol.setter
def mol(self, x):
self.method.mol = x
def kernel(self, params=None):
if params is not None:
self.params.update(params)
params = dict(self.params)
params['maxsteps'] = self.max_cycle
self.converged, self.mol = \
kernel(self.method, callback=self.callback, **params)
return self.mol
optimize = kernel
del(INCLUDE_GHOST, ASSERT_CONV)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf, dft, cc, mp
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g')
mf = scf.RHF(mol)
conv_params = {
'gradientmax': 6e-3, # Eh/Bohr
'gradientrms': 2e-3, # Eh/Bohr
'stepmax': 2e-2, # Bohr
'steprms': 1.5e-2, # Bohr
}
mol1 = optimize(mf, **conv_params)
print(mf.kernel() - -153.219208484874)
print(scf.RHF(mol1).kernel() - -153.222680852335)
mf = dft.RKS(mol)
mf.xc = 'pbe,'
mf.conv_tol = 1e-7
mol1 = optimize(mf)
mymp2 = mp.MP2(scf.RHF(mol))
mol1 = optimize(mymp2)
mycc = cc.CCSD(scf.RHF(mol))
mol1 = optimize(mycc)
|
apache-2.0
| -6,266,812,189,044,078,000 | 33.922481 | 91 | 0.626415 | false |
ingenioustechie/cookiecutter-django-openshift
|
hooks/post_gen_project.py
|
1
|
8246
|
"""
Does the following:
1. Generates and saves random secret key
2. Removes the taskapp if celery isn't going to be used
3. Removes the .idea directory if PyCharm isn't going to be used
4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
TODO: this might have to be moved to a pre_gen_hook
A portion of this code was adopted from Django's standard crypto functions and
utilities, specifically:
https://github.com/django/django/blob/master/django/utils/crypto.py
"""
from __future__ import print_function
import os
import random
import shutil
# Get the root project directory
PROJECT_DIRECTORY = os.path.realpath(os.path.curdir)
# Use the system PRNG if possible
try:
random = random.SystemRandom()
using_sysrandom = True
except NotImplementedError:
using_sysrandom = False
def get_random_string(
length=50,
allowed_chars='abcdefghijklmnopqrstuvwxyz0123456789!@#%^&*(-_=+)'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if using_sysrandom:
return ''.join(random.choice(allowed_chars) for i in range(length))
print(
"Cookiecutter Django couldn't find a secure pseudo-random number generator on your system."
" Please change change your SECRET_KEY variables in conf/settings/local.py and env.example"
" manually."
)
return "CHANGEME!!"
def set_secret_key(setting_file_location):
# Open locals.py
with open(setting_file_location) as f:
file_ = f.read()
# Generate a SECRET_KEY that matches the Django standard
SECRET_KEY = get_random_string()
# Replace "CHANGEME!!!" with SECRET_KEY
file_ = file_.replace('CHANGEME!!!', SECRET_KEY, 1)
# Write the results to the locals.py module
with open(setting_file_location, 'w') as f:
f.write(file_)
def make_secret_key(project_directory):
"""Generates and saves random secret key"""
# Determine the local_setting_file_location
local_setting = os.path.join(
project_directory,
'config/settings/local.py'
)
# local.py settings file
set_secret_key(local_setting)
env_file = os.path.join(
project_directory,
'env.example'
)
# env.example file
set_secret_key(env_file)
def remove_file(file_name):
if os.path.exists(file_name):
os.remove(file_name)
def remove_task_app(project_directory):
"""Removes the taskapp if celery isn't going to be used"""
# Determine the local_setting_file_location
task_app_location = os.path.join(
PROJECT_DIRECTORY,
'{{ cookiecutter.project_slug }}/taskapp'
)
shutil.rmtree(task_app_location)
def remove_pycharm_dir(project_directory):
"""
Removes directories related to PyCharm
if it isn't going to be used
"""
idea_dir_location = os.path.join(PROJECT_DIRECTORY, '.idea/')
if os.path.exists(idea_dir_location):
shutil.rmtree(idea_dir_location)
docs_dir_location = os.path.join(PROJECT_DIRECTORY, 'docs/pycharm/')
if os.path.exists(docs_dir_location):
shutil.rmtree(docs_dir_location)
def remove_heroku_files():
"""
Removes files needed for heroku if it isn't going to be used
"""
for filename in ["app.json", "Procfile", "requirements.txt", "runtime.txt"]:
file_name = os.path.join(PROJECT_DIRECTORY, filename)
remove_file(file_name)
def remove_docker_files():
"""
Removes files needed for docker if it isn't going to be used
"""
for filename in ["dev.yml", "docker-compose.yml", ".dockerignore"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
shutil.rmtree(os.path.join(
PROJECT_DIRECTORY, "compose"
))
def remove_grunt_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["Gruntfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_gulp_files():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["gulpfile.js"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_packageJSON_file():
"""
Removes files needed for grunt if it isn't going to be used
"""
for filename in ["package.json"]:
os.remove(os.path.join(
PROJECT_DIRECTORY, filename
))
def remove_certbot_files():
"""
Removes files needed for certbot if it isn't going to be used
"""
nginx_dir_location = os.path.join(PROJECT_DIRECTORY, 'compose/nginx')
for filename in ["nginx-secure.conf", "start.sh", "dhparams.example.pem"]:
file_name = os.path.join(nginx_dir_location, filename)
remove_file(file_name)
# IN PROGRESS
# def copy_doc_files(project_directory):
# cookiecutters_dir = DEFAULT_CONFIG['cookiecutters_dir']
# cookiecutter_django_dir = os.path.join(
# cookiecutters_dir,
# 'cookiecutter-django',
# 'docs'
# )
# target_dir = os.path.join(
# project_directory,
# 'docs'
# )
# for name in os.listdir(cookiecutter_django_dir):
# if name.endswith('.rst') and not name.startswith('index'):
# src = os.path.join(cookiecutter_django_dir, name)
# dst = os.path.join(target_dir, name)
# shutil.copyfile(src, dst)
# 1. Generates and saves random secret key
make_secret_key(PROJECT_DIRECTORY)
# 2. Removes the taskapp if celery isn't going to be used
if '{{ cookiecutter.use_celery }}'.lower() == 'n':
remove_task_app(PROJECT_DIRECTORY)
# 3. Removes the .idea directory if PyCharm isn't going to be used
if '{{ cookiecutter.use_pycharm }}'.lower() != 'y':
remove_pycharm_dir(PROJECT_DIRECTORY)
# 4. Removes all heroku files if it isn't going to be used
if '{{ cookiecutter.use_heroku }}'.lower() != 'y':
remove_heroku_files()
# 5. Removes all docker files if it isn't going to be used
if '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_docker_files()
# 6. Removes all JS task manager files if it isn't going to be used
if '{{ cookiecutter.js_task_runner}}'.lower() == 'gulp':
remove_grunt_files()
elif '{{ cookiecutter.js_task_runner}}'.lower() == 'grunt':
remove_gulp_files()
else:
remove_gulp_files()
remove_grunt_files()
remove_packageJSON_file()
# 7. Removes all certbot/letsencrypt files if it isn't going to be used
if '{{ cookiecutter.use_lets_encrypt }}'.lower() != 'y':
remove_certbot_files()
# 8. Display a warning if use_docker and use_grunt are selected. Grunt isn't
# supported by our docker config atm.
if '{{ cookiecutter.js_task_runner }}'.lower() in ['grunt', 'gulp'] and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use docker and a JS task runner. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but you will need to add a "
"js task runner service to your docker configuration manually."
)
# 9. Removes the certbot/letsencrypt files and display a warning if use_lets_encrypt is selected and use_docker isn't.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() != 'y':
remove_certbot_files()
print(
"You selected to use Let's Encrypt and didn't select to use docker. This is NOT supported out of the box for now. You "
"can continue to use the project like you normally would, but Let's Encrypt files have been included."
)
# 10. Directs the user to the documentation if certbot and docker are selected.
if '{{ cookiecutter.use_lets_encrypt }}'.lower() == 'y' and '{{ cookiecutter.use_docker }}'.lower() == 'y':
print(
"You selected to use Let's Encrypt, please see the documentation for instructions on how to use this in production. "
"You must generate a dhparams.pem file before running docker-compose in a production environment."
)
# 4. Copy files from /docs/ to {{ cookiecutter.project_slug }}/docs/
# copy_doc_files(PROJECT_DIRECTORY)
|
mit
| -1,349,989,129,254,802,700 | 32.116466 | 127 | 0.657531 | false |
smuthunoori/ComicCreator
|
generaloptions.py
|
1
|
1035
|
__author__ = 'smuthunoori'
import kivy
kivy.require('1.7.0')
from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ListProperty
class GeneralOptions(BoxLayout):
group_mode = False
translation = ListProperty(None)
def clear(self, instance):
self.drawing_space.clear_widgets()
def remove(self, instance):
if self.drawing_space.children:
self.drawing_space.remove_widget(self.drawing_space.children[0])
def group(self, instance, value):
if value == 'down':
self.group_mode = True
else:
self.group_mode = False
self.unselectall()
def color(self, instance):
pass
def gestures(self, instance, value):
pass
def unselectall(self):
for child in self.drawing_space.children:
child.unselect()
def on_translation(self, instance, value):
for child in self.drawing_space.children:
if child.selected:
child.translate(*self.translation)
|
mit
| -7,419,711,629,351,038,000 | 24.243902 | 76 | 0.628986 | false |
RusticiSoftware/TinCanPython
|
test/lrs_response_test.py
|
1
|
6200
|
# coding=utf-8
#
# Copyright 2014 Rustici Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless respuired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import http.client
if __name__ == '__main__':
from test.main import setup_tincan_path
setup_tincan_path()
from tincan import LRSResponse, HTTPRequest
class LRSResponseTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_init_empty(self):
resp = LRSResponse()
self.assertIsInstance(resp, LRSResponse)
self.assertIsNone(resp.content)
self.assertTrue(hasattr(resp, "success"))
self.assertFalse(resp.success)
self.assertTrue(hasattr(resp, "request"))
self.assertIsNone(resp.request)
self.assertTrue(hasattr(resp, "response"))
self.assertIsNone(resp.response)
def test_init_kwarg_exception(self):
with self.assertRaises(AttributeError):
LRSResponse(bad_test="test")
def test_init_arg_exception_dict(self):
d = {"bad_test": "test", "content": "ok"}
with self.assertRaises(AttributeError):
LRSResponse(d)
def test_init_arg_exception_obj(self):
class Tester(object):
def __init__(self, success=True, bad_test="test"):
self.success = success
self.bad_test = bad_test
obj = Tester()
with self.assertRaises(AttributeError):
LRSResponse(obj)
def test_init_partial(self):
req = HTTPRequest(resource="test")
resp = LRSResponse(
success=True,
content="content test",
request=req,
)
self.assertIsInstance(resp, LRSResponse)
self.assertTrue(resp.success)
self.assertEqual(resp.content, "content test")
self.assertIsInstance(resp.request, HTTPRequest)
self.assertEqual(resp.request, req)
self.assertTrue(hasattr(resp, "response"))
self.assertIsNone(resp.response)
def test_init_all(self):
conn = http.client.HTTPConnection("tincanapi.com")
conn.request("GET", "")
web_resp = conn.getresponse()
req = HTTPRequest(resource="test")
resp = LRSResponse(
success=True,
content="content test",
request=req,
response=web_resp,
)
self.assertIsInstance(resp, LRSResponse)
self.assertTrue(resp.success)
self.assertEqual(resp.content, "content test")
self.assertIsInstance(resp.request, HTTPRequest)
self.assertEqual(resp.request, req)
self.assertIsInstance(resp.response, http.client.HTTPResponse)
self.assertEqual(resp.response, web_resp)
def test_setters(self):
conn = http.client.HTTPConnection("tincanapi.com")
conn.request("GET", "")
web_resp = conn.getresponse()
req = HTTPRequest(resource="test")
resp = LRSResponse()
resp.success = True
resp.content = "content test"
resp.request = req
resp.response = web_resp
self.assertIsInstance(resp, LRSResponse)
self.assertTrue(resp.success)
self.assertEqual(resp.content, "content test")
self.assertIsInstance(resp.request, HTTPRequest)
self.assertEqual(resp.request, req)
self.assertEqual(resp.request.resource, "test")
self.assertIsInstance(resp.response, http.client.HTTPResponse)
self.assertEqual(resp.response, web_resp)
def test_unicode(self):
resp = LRSResponse()
resp.data = b"\xce\xb4\xce\xbf\xce\xba\xce\xb9\xce\xbc\xce\xae " \
b"\xcf\x80\xce\xb5\xcf\x81\xce\xb9\xce\xb5\xcf\x87" \
b"\xce\xbf\xce\xbc\xce\xad\xce\xbd\xce\xbf\xcf\x85"
self.assertIsInstance(resp, LRSResponse)
self.assertIsInstance(resp.data, str)
self.assertEqual(resp.data, u"δοκιμή περιεχομένου")
def test_setters_none(self):
resp = LRSResponse()
resp.success = None
resp.content = None
resp.request = None
resp.response = None
self.assertIsInstance(resp, LRSResponse)
self.assertTrue(hasattr(resp, "content"))
self.assertIsNone(resp.content)
self.assertTrue(hasattr(resp, "success"))
self.assertFalse(resp.success)
self.assertTrue(hasattr(resp, "request"))
self.assertIsNone(resp.request)
self.assertTrue(hasattr(resp, "response"))
self.assertIsNone(resp.response)
def test_request_setter(self):
class Tester(object):
def __init__(self, resource="ok", headers=None):
if headers is None:
headers = {"test": "ok"}
self.resource = resource
self.headers = headers
obj = Tester()
resp = LRSResponse(request=obj)
self.assertIsInstance(resp, LRSResponse)
self.assertIsInstance(resp.request, HTTPRequest)
self.assertTrue(hasattr(resp.request, "resource"))
self.assertEqual(resp.request.resource, "ok")
self.assertTrue(hasattr(resp.request, "headers"))
self.assertEqual(resp.request.headers, {"test": "ok"})
def test_response_setter(self):
class Tester(object):
def __init__(self, msg="ok", version="test"):
self.msg = msg
self.version = version
obj = Tester()
with self.assertRaises(TypeError):
LRSResponse(response=obj)
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(LRSResponseTest)
unittest.TextTestRunner(verbosity=2).run(suite)
|
apache-2.0
| -417,415,810,103,817,200 | 30.065327 | 77 | 0.62407 | false |
ericchill/gnofract4d
|
fract4d/gradient.py
|
1
|
28489
|
#!/usr/bin/env python
import math
import re
import StringIO
import copy
import random
import types
import struct
import colorsys
#Class definition for Gradients
#These use the format defined by the GIMP
#The file format is:
# GIMP Gradient ; literal identifier
# Name: <utf8-name> ; optional, else get from filename
# 3 ; number of points N
# ; N lines like this
# 0.000000 0.166667 0.333333 0.000000 0.000000 1.000000 1.000000 0.000000 0.000000 1.000000 1.000000 0 0
# The format is
# start middle end [range 0...1]
# R G B A left endpoint
# R G B A right endpoint
# segment_type coloring_type
# segment-type is
# GIMP_GRADIENT_SEGMENT_LINEAR,
# GIMP_GRADIENT_SEGMENT_CURVED,
# GIMP_GRADIENT_SEGMENT_SINE,
# GIMP_GRADIENT_SEGMENT_SPHERE_INCREASING,
# GIMP_GRADIENT_SEGMENT_SPHERE_DECREASING
# color type is
# GIMP_GRADIENT_SEGMENT_RGB, /* normal RGB */
# GIMP_GRADIENT_SEGMENT_HSV_CCW, /* counterclockwise hue */
# GIMP_GRADIENT_SEGMENT_HSV_CW /* clockwise hue */
#gradientfile_re = re.compile(r'\s*(RGB|HSV)\s+(Linear|Sinusoidal|CurvedI|CurvedD)\s+(\d+\.?\d+)\s+(\d+)\s+(\d+)\s+(\d+\.?\d+)\s+(\d+)\s+(\d+)')
rgb_re = re.compile(r'\s*(\d+)\s+(\d+)\s+(\d+)')
class FileType:
MAP, GGR, CS, UGR = range(4)
@staticmethod
def guess(s):
s = s.lower()
if s.endswith(".map"):
return FileType.MAP
elif s.endswith(".cs"):
return FileType.CS
elif s.endswith(".ugr"):
return FileType.UGR
else:
# assume a GIMP gradient, those sometimes don't have extensions
return FileType.GGR
class Blend:
LINEAR, CURVED, SINE, SPHERE_INCREASING, SPHERE_DECREASING = range(5)
class ColorMode:
RGB, HSV_CCW, HSV_CW = range(3)
class Error(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class HsvError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
class Segment:
EPSILON=1.0E-7
def __init__(self, left, left_color, right, right_color, mid=None,
blend_mode=Blend.LINEAR,
color_mode=ColorMode.RGB):
self.cmode = color_mode
self.bmode = blend_mode
self.left = left
self.left_color = left_color
self.right = right
self.right_color = right_color
if mid == None:
self.center()
else:
self.mid = mid
def __copy__(self):
return Segment(
self.left, self.left_color[:],
self.right, self.right_color[:], self.mid,
self.blend_mode, self.color_mode)
def __eq__(self,other):
if other == None: return False
if not isinstance(other, Segment): return False
return self.cmode == other.cmode and \
self.bmode == other.bmode and \
self.close(self.left, other.left) and \
self.close(self.right, other.right) and \
self.close(self.mid, other.mid) and \
self.close(self.left_color, other.left_color) and \
self.close(self.right_color, other.right_color)
def __ne__(self, other):
return not self.__eq__(other)
def left_of(self,other):
# true if other.left == this.right
return other.left == self.right and \
other.left_color[0] == self.right_color[0] and \
other.left_color[1] == self.right_color[1] and \
other.left_color[2] == self.right_color[2] and \
other.left_color[3] == self.right_color[3]
def right_of(self,other):
# true if other.right == this.left
return other.right == self.left and \
other.right_color[0] == self.left_color[0] and \
other.right_color[1] == self.left_color[1] and \
other.right_color[2] == self.left_color[2] and \
other.right_color[3] == self.left_color[3]
def close(self, a, b):
# True if a is nearly == b
if isinstance(a, types.ListType):
for (ax,bx) in zip(a,b):
if abs(ax-bx) > 1.0E-5:
return False
return True
else:
return abs(a-b) < 1.0E-5
def center(self):
self.mid = (self.left + self.right) / 2.0
def get_linear_factor(self, pos, middle):
if pos <= middle:
if middle < Segment.EPSILON:
return 0.0
else:
return 0.5 * pos / middle
else:
pos -= middle;
middle = 1.0 - middle
if middle < Segment.EPSILON:
return 1.0
else:
return 0.5 + 0.5 * pos / middle
def get_curved_factor(self, pos, middle):
if middle < Segment.EPSILON:
middle = Segment.EPSILON
try:
return math.pow(pos, ( math.log(0.5) / math.log(middle) ))
except ZeroDivisionError:
# 0^negative number is NaN
return 0.0
def get_sine_factor(self, pos, middle):
pos = self.get_linear_factor(pos, middle)
return (math.sin ((-math.pi / 2.0) + math.pi * pos) + 1.0) / 2.0
def get_sphere_increasing_factor(self, pos, middle):
pos = self.get_linear_factor(pos, middle) - 1.0
return math.sqrt (1.0 - pos * pos)
def get_sphere_decreasing_factor(self, pos, middle):
pos = self.get_linear_factor(pos, middle)
return 1.0 - math.sqrt (1.0 - pos * pos)
def get_color_at(self, pos):
'compute the color value for a point in this segment'
lcol = self.left_color
rcol = self.right_color
if self.cmode == ColorMode.HSV_CCW or self.cmode == ColorMode.HSV_CW:
lcol = [v for v in colorsys.rgb_to_hsv(lcol[0],lcol[1],lcol[2])] + [lcol[3]]
rcol = [v for v in colorsys.rgb_to_hsv(rcol[0],rcol[1],rcol[2])] + [rcol[3]]
if self.cmode == ColorMode.HSV_CCW:
if lcol[0] >= rcol[0]: rcol[0] += 1.0
if self.cmode == ColorMode.HSV_CW:
if lcol[0] <= rcol[0]: lcol[0] += 1.0
len = self.right-self.left
if len < Segment.EPSILON:
# avoid division by zero
mpos = 0.5
pos = 0.5
else:
mpos = (self.mid - self.left) / len
pos = (pos- self.left) / len
if self.bmode == Blend.LINEAR:
factor = self.get_linear_factor(pos, mpos)
elif self.bmode == Blend.CURVED:
factor = self.get_curved_factor(pos, mpos)
elif self.bmode == Blend.SINE:
factor = self.get_sine_factor(pos, mpos)
elif self.bmode == Blend.SPHERE_INCREASING:
factor = self.get_sphere_increasing_factor(pos, mpos)
elif self.bmode == Blend.SPHERE_DECREASING:
factor = self.get_sphere_decreasing_factor(pos, mpos)
#Assume RGB mode, for the moment
RH = lcol[0] + (rcol[0] - lcol[0]) * factor
GS = lcol[1] + (rcol[1] - lcol[1]) * factor
BV = lcol[2] + (rcol[2] - lcol[2]) * factor
A = lcol[3] + (rcol[3] - lcol[3]) * factor
if self.cmode == ColorMode.RGB:
return [RH, GS, BV, A]
if self.cmode == ColorMode.HSV_CCW or self.cmode == ColorMode.HSV_CW:
if RH > 1: RH -= 1
return [v for v in colorsys.hsv_to_rgb(RH,GS,BV)] + [A]
def save(self,f,skip_left=False):
if skip_left:
# this segment's left end == previous right, so leave it out
print >>f, "+%6f %6f" % (self.mid, self.right),
for x in self.right_color:
print >>f, "%6f" % x,
else:
print >>f, "%6f %6f %6f" % (self.left, self.mid, self.right),
for x in self.left_color + self.right_color:
print >>f, "%6f" % x,
print >>f, "%d %d" % (self.bmode, self.cmode)
class Gradient:
def __init__(self):
self.segments=[
Segment(0,[0,0,0,1.0], 1.0, [1.0,1.0,1.0,1.0])]
self.name=None
self.alternate=0
self.offset=0
self.cobject=None
def __copy__(self):
c = Gradient()
c.name = self.name
c.alternate = self.alternate
c.offset = self.offset
c.segments = copy.deepcopy(self.segments)
return c
def __eq__(self, other):
if other == None: return False
if not isinstance(other, Gradient): return False
if self.name != other.name: return False
if self.segments != other.segments: return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def serialize(self):
s = StringIO.StringIO()
self.save(s,True)
return s.getvalue()
def save(self,f,compress=False):
print >>f, "GIMP Gradient"
if self.name:
print >>f, "Name:", self.name
print >>f, len(self.segments)
last = None
for seg in self.segments:
compress_seg = compress and last != None and seg.right_of(last)
seg.save(f, compress_seg)
last = seg
def load_cs(self, f):
"Load a ColorSchemer (.cs) palette file"
# this appears to work but file format was reverse engineered
# so there may be cases unaccounted for
(ncolors,) = struct.unpack("2xB5x",f.read(8))
list = []
for i in xrange(ncolors):
(r,g,b,skip) = struct.unpack("<BBBxI", f.read(8))
entry = (i/float(ncolors), r,g,b,255)
f.read(skip)
(r2,g2,b2,skip) = struct.unpack("BBBB", f.read(4))
f.read(skip+1)
list.append(entry)
self.load_list(list)
def load_ugr(self, f):
"Load an ir tree parsed by the translator"
prev_index = 0.0
index = 0.0
segments = []
prev_color = [0.0,0.0,0.0,0.0]
for s in f.sections["gradient"].children:
(name,val) = (s.children[0].name, s.children[1].value)
if name == "index":
index = float(val)/400.0
elif name == "color":
icolor = val
color = [
float(icolor & 0xFF) / 256.0,
float((icolor >> 8) & 0xFF) / 256.0,
float((icolor >> 16) & 0xFF) / 256.0,
1.0]
seg = Segment(
prev_index, prev_color,
index, color,
(prev_index + index)/2.0,
Blend.LINEAR, ColorMode.RGB)
segments.append(seg)
prev_index = index
prev_color = color
elif name == "smooth":
pass #self.smooth = val
elif name == "title":
self.name = val
# append a last chunk from the final value to 1.0
seg = Segment(
prev_index, prev_color,
1.0, prev_color,
(prev_index + 1.0)/2.0,
Blend.LINEAR, ColorMode.RGB)
segments.append(seg)
self.segments = segments
def load_gimp_gradient(self,f):
new_segments = []
name = None
line = f.readline()
if line.startswith("Name:"):
name = line[5:].strip()
line = f.readline()
num_vals = int(line)
for i in xrange(num_vals):
line = f.readline()
if line[:1] == "+":
# a compressed continuation, use last vals
left = right
lr,lg,lb,la = rr,rg,rb,ra
[mid,right,rr,rg,rb,ra,bmode,cmode] = line.split()
else:
list_elements = line.split()
[left, mid, right,
lr, lg, lb, la,
rr, rg, rb, ra,
bmode, cmode] = list_elements[0:13]
seg = Segment(
float(left), [float(lr), float(lg), float(lb), float(la)],
float(right),[float(rr), float(rg), float(rb), float(ra)],
float(mid),
int(bmode), int(cmode))
new_segments.append(seg)
self.segments = new_segments
self.name = name
def load(self,f):
if hasattr(f, "readline"):
# assume this is a file type
line = f.readline()
if line == "GIMP Gradient\n":
return self.load_gimp_gradient(f)
elif line[:2] == "\x03\x00":
# a .cs file, we suspect
f.seek(0)
return self.load_cs(f)
else:
f.seek(0)
return self.load_map_file(f)
else:
# assume it's a translated UGR file
return self.load_ugr(f)
def compare_colors(self, c1, c2, maxdiff=0):
# return true if floating-point colors c1 and c2 are close
# enough that they would be equal when truncated to 8 bits
for (a,b) in zip(c1, c2):
a8 = int(a * 255.0)
b8 = int(b * 255.0)
if abs(a8 - b8) > maxdiff:
return False
return True
def load_map_file(self,mapfile,maxdiff=0):
i = 0
colorlist = []
solid = (0,0,0,255)
for line in mapfile:
m = rgb_re.match(line)
if m != None:
(r,g,b) = (min(255, int(m.group(1))),
min(255, int(m.group(2))),
min(255, int(m.group(3))))
if i == 0:
# first color is inside solid color
solid = (r,g,b,255)
else:
colorlist.append(((i-1)/255.0,r,g,b,255))
i += 1
self.load_list(colorlist,maxdiff)
return solid
def load_list(self,l, maxdiff=0):
# a colorlist is a simplified gradient, of the form
# (index, r, g, b, a) (colors are 0-255 ints)
# each index is the left-hand end of the segment
# each colorlist entry is mapped to a segment endpoint
if len(l) == 0:
raise Error("No colors found")
new_segments = []
last_index = 0.0
last_color = [0.0,0.0,0.0,1.0]
before_last_color = [-1000.0, -1000.0 , -1000.0, -1000.0] # meaningless color
before_last_index = -1.0
for (index,r,g,b,a) in l:
color = [r/255.0, g/255.0, b/255.0, a/255.0]
if index != last_index:
test_segment = Segment(
before_last_index,
before_last_color,
index,
color)
if self.compare_colors(
test_segment.get_color_at(last_index), last_color, maxdiff):
# can compress, update in place
new_segments[-1].right_color = color
new_segments[-1].right = index
new_segments[-1].center()
else:
new_segments.append(
Segment(last_index, last_color, index, color))
before_last_index = last_index
before_last_color = last_color
last_color = color
last_index = index
# fix gradient by adding extra flat section if last index not 1.0
if new_segments[-1].right != 1.0:
new_segments.append(
Segment(new_segments[-1].right, last_color, 1.0, last_color))
self.segments = new_segments
def load_fractint(self,l):
# l is a list of colors from a Fractint .par file
# convert format to colorlist
i = 0
colors = []
for (r,g,b) in l:
colors.append((i/255.0,r*4,g*4,b*4,255))
i += 1
# load it
self.load_list(colors,-1.0)
def set_color(self,seg_id,is_left,r,g,b):
if seg_id < 0 or seg_id >= len(self.segments):
return False
seg = self.segments[seg_id]
if is_left:
seg.left_color = [r,g,b, seg.left_color[3]]
else:
seg.right_color = [r,g,b, seg.right_color[3]]
return True
def complementaries(self, base_color):
# return some other colors that "go" with this one
hsv = RGBtoHSV(base_color)
(h,s,v,a) = hsv
# take 2 colors which are almost triads
h = hsv[0]
delta = random.gauss(0.0, 0.8)
h2 = math.fmod(h + 2.5 + delta, 6.0)
h3 = math.fmod(h + 3.5 - delta, 6.0)
# take darker and lighter versions
v = hsv[2]
vlight = self.clamp(v * 1.5, 0.0, 1.0)
vdark = v * 0.5
colors = [
[h, s, vdark, a],
[h, s, v, a],
[h, s, vlight, a],
[h2, s, vlight, a],
[h2, s, v, a],
[h2, s, vdark, a],
[h3, s, vdark, a],
[h3, s, v, a],
[h3, s, vlight, a]]
colors = [ HSVtoRGB(x) for x in colors]
return colors
def randomize(self, length):
if random.random() < 0.5:
self.randomize_complementary(length)
else:
self.randomize_spheres((int(random.random() * 4)+3)*2)
def randomize_complementary(self,length):
base = [random.random(), random.random(), random.random(), 1.0]
colors = self.complementaries(base)
self.segments = []
prev_index = 0.0
prev_color = colors[0]
first_color = prev_color
for i in xrange(9-1):
index = float(i+1)/length
color = colors[i]
self.segments.append(
Segment(prev_index, prev_color, index, color))
prev_color = color
prev_index = index
self.segments.append(
Segment(prev_index, prev_color, 1.0, first_color)) # make it wrap
def random_bright_color(self):
return HSVtoRGB(
[ random.random() * 360.0,
random.random(),
random.random() * 0.6 + 0.4,
1.0])
def randomize_spheres(self, length):
self.segments = []
prev_index = 0.0
prev_color = self.random_bright_color()
first_color = prev_color
for i in xrange(length-1):
index = float(i+1)/length
if i % 2 == 1:
color = self.random_bright_color()
blend = Blend.SPHERE_INCREASING
else:
color = [0.0, 0.0, 0.0, 1.0]
blend = Blend.SPHERE_DECREASING
self.segments.append(
Segment(prev_index, prev_color, index, color, None, blend))
prev_color = color
prev_index = index
self.segments.append(
Segment(prev_index, prev_color, 1.0, first_color)) # make it wrap
def get_color_at(self, pos):
# returns the color at position x (0 <= x <= 1.0)
seg = self.get_segment_at(pos)
return seg.get_color_at(pos)
def get_segment_at(self, pos):
#Returns the segment in which pos resides.
if pos < 0.0:
raise IndexError("Must be between 0 and 1, is %s" % pos)
for seg in self.segments:
if pos <= seg.right:
return seg
# not found - must be > 1.0
raise IndexError("Must be between 0 and 1, is %s" % pos)
def get_index_at(self, pos):
# returns the index of the segment in which pos resides
if pos < 0.0:
raise IndexError("Must be between 0 and 1")
length = len(self.segments)
for i in xrange(length):
if pos <= self.segments[i].right:
return i
# not found - must be > 1.0
raise IndexError("Must be between 0 and 1")
def add(self, segindex):
# split the segment which contains point x in half
seg = self.segments[segindex]
if segindex+1 < len(self.segments):
# copy info from next segment to right
segright = self.segments[segindex+1]
right_index = segright.left
right_color = segright.left_color
else:
# adding at right-hand end
right_index = 1.0
right_color = seg.right_color
s_len = (seg.right-seg.left)
s_mid = seg.left + s_len*0.5
newcol= self.get_color_at(s_mid)
# update existing segment to occupy left half
seg.right = s_mid
seg.right_color = newcol
seg.center()
# add new segment to fill right half
self.segments.insert(
segindex+1,
Segment(s_mid, newcol,
right_index, right_color,
None,
seg.bmode, seg.cmode))
def remove(self, segindex, smooth=False):
# remove the segment which contains point x
# extend each of our neighbors so they get half our space each
if len(self.segments) < 2:
raise Error("Can't remove last segment")
seg = self.segments[segindex]
if segindex > 0:
# we have a previous segment
if segindex+1 < len(self.segments):
# and we have a next. Move them both to touch in the middle
self.segments[segindex-1].right=seg.mid
self.segments[segindex+1].left=seg.mid
self.segments[segindex-1].center()
self.segments[segindex+1].center()
if smooth:
midcolor = seg.get_color_at(seg.mid)
self.segments[segindex-1].right_color = copy.copy(midcolor)
self.segments[segindex+1].left_color = copy.copy(midcolor)
else:
# just a left-hand neighbor, let that take over
self.segments[segindex-1].right = 1.0
if smooth:
self.segments[segindex-1].right_color = \
copy.copy(self.segments[segindex].right_color)
self.segments[segindex-1].center()
else:
# we must have a later segment
self.segments[segindex+1].left=0.0
if smooth:
self.segments[segindex+1].left_color = \
copy.copy(self.segments[segindex].left_color)
self.segments[segindex+1].center()
self.segments.pop(segindex)
def clamp(self,a,min,max):
if a > max:
return max
elif a < min:
return min
else:
return a
def set_left(self,i,pos):
# set left end of segment i to pos, if possible
if i < 0 or i >= len(self.segments):
raise IndexError("No such segment")
if i == 0:
# can't move left-hand end of entire gradient
return 0.0
else:
pos = self.clamp(pos,
self.segments[i-1].mid + Segment.EPSILON,
self.segments[i].mid - Segment.EPSILON)
self.segments[i-1].right = self.segments[i].left = pos
return pos
def set_right(self,i,pos):
# set left end of segment i to pos, if possible
if i < 0 or i >= len(self.segments):
raise IndexError("No such segment")
max = len(self.segments)-1
if i == max:
# can't move right-hand end of entire gradient
return 1.0
else:
pos = self.clamp(pos,
self.segments[i].mid + Segment.EPSILON,
self.segments[i+1].mid - Segment.EPSILON)
self.segments[i+1].left = self.segments[i].right = pos
return pos
def set_middle(self,i,pos):
# set middle of segment i to pos, if possible
if i < 0 or i >= len(self.segments):
raise IndexError("No such segment")
pos = self.clamp(pos,
self.segments[i].left + Segment.EPSILON,
self.segments[i].right - Segment.EPSILON)
self.segments[i].mid = pos
return pos
def broken_move(self, handle, move):
seg, side = self.getSegFromHandle(handle)
segindex = self.segments.index(seg)
if (segindex > 0 or side == 'right') and (segindex < len(self.segments)-1 or side == 'left'):
if side == 'left':
self.segments[segindex-1].right.pos+=move
if self.segments[segindex-1].right.pos > 1:
self.segments[segindex-1].right.pos = 1
elif self.segments[segindex-1].right.pos < 0:
self.segments[segindex-1].right.pos = 0
seg.left.pos+=move
if seg.left.pos > 1:
seg.left.pos =1
elif seg.left.pos < 0:
seg.left.pos =0
if seg.left.pos > seg.right.pos:
seg.left.pos = seg.right.pos
self.segments[segindex-1].right.pos=seg.right.pos
elif self.segments[segindex-1].right.pos < self.segments[segindex-1].left.pos:
self.segments[segindex-1].right.pos=self.segments[segindex-1].left.pos
seg.left.pos=self.segments[segindex-1].left.pos
else:
self.segments[segindex+1].left.pos+=move
if self.segments[segindex+1].left.pos > 1:
self.segments[segindex+1].left.pos = 1
elif self.segments[segindex+1].left.pos < 0:
self.segments[segindex+1].left.pos = 0
seg.right.pos+=move
if seg.right.pos > 1:
seg.right.pos =1
elif seg.right.pos < 0:
seg.right.pos =0
if seg.left.pos > seg.right.pos:
seg.right.pos=seg.left.pos
self.segments[segindex+1].left.pos=seg.left.pos
elif self.segments[segindex+1].right.pos < self.segments[segindex+1].left.pos:
self.segments[segindex+1].left.pos=self.segments[segindex+1].right.pos
seg.right.pos=self.segments[segindex+1].right.pos
# These two are adapted from the algorithms at
# http://www.cs.rit.edu/~ncs/color/t_convert.html
def RGBtoHSV(rgb):
hsv = [0,0,0,rgb[3]]
trgb = rgb[0:3]
trgb.sort()
min = trgb[0]
max = trgb[2]
delta = float(max - min)
hsv[2] = max
if delta == 0:
# r = g = b = 0 # s = 0, v is undefined
hsv[1] = 0
hsv[0] = -1
else:
hsv[1]=delta / max
if rgb[0] == max:
hsv[0] = (rgb[1] - rgb[2]) / delta # between yellow & magenta
elif rgb[1] == max:
hsv[0] = 2 + (rgb[2] - rgb[0] ) / delta # between cyan & yellow
else:
hsv[0] = 4 + (rgb[0] - rgb[1] ) / delta # between magenta & cyan
hsv[0] *= 60 # degrees
if hsv[0] < 0:
hsv[0] += 360
return hsv
def HSVtoRGB(hsv):
rgb=[0,0,0,hsv[3]] # pass through alpha channel
hsv[0]/=60
if hsv[1] == 0:
return [hsv[2],hsv[2],hsv[2]]
i = int(hsv[0])
f = hsv[0] - i #Decimal bit of hue
p = hsv[2] * (1 - hsv[1])
q = hsv[2] * (1 - hsv[1] * f)
t = hsv[2] * (1 - hsv[1] * (1 - f))
if i == 0:
rgb[0] = hsv[2]
rgb[1] = t
rgb[2] = p
elif i == 1:
rgb[0] = q
rgb[1] = hsv[2]
rgb[2] = p
elif i == 2:
rgb[0] = p
rgb[1] = hsv[2]
rgb[2] = t
elif i == 3:
rgb[0] = p
rgb[1] = q
rgb[2] = hsv[2]
elif i == 4:
rgb[0] = t
rgb[1] = p
rgb[2] = hsv[2]
elif i == 5:
rgb[0] = hsv[2]
rgb[1] = p
rgb[2] = q
return rgb
|
bsd-3-clause
| 3,981,176,068,436,644,000 | 33.241587 | 144 | 0.496753 | false |
CactusTribe/PyRobot
|
src/server/modules/CameraModule.py
|
1
|
1742
|
import threading, io, struct, time
from ThreadClient import ThreadClient
from devices.Devices import *
class CameraModule(ThreadClient):
def __init__(self, name):
ThreadClient.__init__(self, name)
self.camera_capture = False
# Execute command
def execute_cmd(self, cmd):
args = cmd.split(" ")
cmd = args[0]
if cmd == "cam":
self.Camera_commands(args)
# --------------------------------------------
# MODULE CAMERA
# --------------------------------------------
def Camera_commands(self, args):
try:
if args[1] == "start":
print(" -> Start camera capture")
self.camera_capture = True
ThreadCapture = threading.Thread(target = self.Camera_capture, args = [] )
ThreadCapture.start()
elif args[1] == "stop":
self.camera_capture = False
print(" -> Stop camera capture")
except Exception as e:
print(e)
traceback.print_exc()
def Camera_capture(self):
pipe = self.connection.makefile('wb')
i = 0
try:
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg', use_video_port=True):
# Write the length of the capture to the stream and flush to
# ensure it actually gets sent
pipe.write(struct.pack('<L', stream.tell()))
pipe.flush()
# Rewind the stream and send the image data over the wire
stream.seek(0)
pipe.write(stream.read())
if self.camera_capture == False:
break
# Reset the stream for the next capture
stream.seek(0)
stream.truncate()
i += 1
#print(" > Frame " + str(i) + " time " + str(elapsed)[0:6] + " ms")
# Write a length of zero to the stream to signal we're done
pipe.write(struct.pack('<L', 0))
print(" (*) " + str(i) + " frames sended")
finally:
pipe.close()
|
gpl-3.0
| 3,101,360,533,350,989,000 | 24.632353 | 78 | 0.607348 | false |
bitmazk/django-document-library
|
document_library/templatetags/document_library_tags.py
|
1
|
1256
|
"""Templatetags for the ``document_library`` app."""
from django import template
from ..models import Document
register = template.Library()
@register.assignment_tag
def get_files_for_document(document):
"""
Returns the available files for all languages.
In case the file is already present in another language, it does not re-add
it again.
"""
files = []
for doc_trans in document.translations.all():
if doc_trans.filer_file is not None and \
doc_trans.filer_file not in files:
doc_trans.filer_file.language = doc_trans.language_code
files.append(doc_trans.filer_file)
return files
@register.assignment_tag(takes_context=True)
def get_frontpage_documents(context):
"""Returns the library favs that should be shown on the front page."""
req = context.get('request')
qs = Document.objects.published(req).filter(is_on_front_page=True)
return qs
@register.assignment_tag(takes_context=True)
def get_latest_documents(context, count=5):
"""
Returns the latest documents.
:param count: Number of documents to be returned. Defaults to 5.
"""
req = context.get('request')
qs = Document.objects.published(req)[:count]
return qs
|
mit
| -5,710,013,912,025,376,000 | 26.304348 | 79 | 0.680732 | false |
fabianopetroni/connector
|
connector/connector.py
|
1
|
1179
|
import os
import sys
# Constant for verify connection
FTRACK_CONNECTED = False
sys.path += ["D:/server/apps/3rdparty/ftrack-python"]
os.environ['FTRACK_SERVER'] = 'https://cas.ftrackapp.com'
os.environ['LOGNAME'] = 'fabianopetroni'
import ftrack
FTRACK_CONNECTED = True
class Connector(object):
"""Class for Connector
Args:
user (Optional[str]): user name, and user system username if None
"""
def __init__(self, user=None):
super(Connector, self).__init__()
self.user = user
self.userDetails = None
self.userTasks = None
if not self.user:
self.user = os.environ['USERNAME']
def getUser(self):
return self.user
def setUser(self, value):
self.user = value
def connect(self):
os.environ['LOGNAME'] = self.user
if FTRACK_CONNECTED is True:
print 'Connecion Succesful !'
def getUserDetails(self):
self.userDetails = ftrack.getUser(self.user)
return self.userDetails
def getUserTasks(self):
userDetail = self.getUserDetails()
self.userTasks = userDetail.getTasks()
return self.userTasks
|
mit
| 2,735,098,389,936,490,500 | 21.245283 | 73 | 0.628499 | false |
quodlibet/quodlibet
|
quodlibet/qltk/songlistcolumns.py
|
1
|
16188
|
# Copyright 2005 Joe Wreschnig
# 2012 Christoph Reiter
# 2011-2020 Nick Boultbee
# 2014 Jan Path
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk, Pango, GLib, Gio
from senf import fsnative, fsn2text
from quodlibet.util.string.date import format_date
from quodlibet import _
from quodlibet import util
from quodlibet import config
from quodlibet import app
from quodlibet.pattern import Pattern
from quodlibet.qltk.views import TreeViewColumnButton
from quodlibet.qltk import add_css
from quodlibet.util.path import unexpand
from quodlibet.formats._audio import FILESYSTEM_TAGS
from quodlibet.qltk.x import CellRendererPixbuf
def create_songlist_column(t):
"""Returns a SongListColumn instance for the given tag"""
if t in ["~#added", "~#mtime", "~#lastplayed", "~#laststarted"]:
return DateColumn(t)
elif t in ["~length", "~#length"]:
return LengthColumn()
elif t == "~#filesize":
return FilesizeColumn()
elif t in ["~rating"]:
return RatingColumn()
elif t.startswith("~#"):
return NumericColumn(t)
elif t in FILESYSTEM_TAGS:
return FSColumn(t)
elif "<" in t:
return PatternColumn(t)
elif "~" not in t and t != "title":
return NonSynthTextColumn(t)
else:
return WideTextColumn(t)
def _highlight_current_cell(cr, background_area, cell_area, flags):
"""Draws a 'highlighting' background for the cell. Look depends on
the active theme.
"""
# Use drawing code/CSS for Entry (reason being that it looks best here)
dummy_widget = Gtk.Entry()
style_context = dummy_widget.get_style_context()
style_context.save()
# Make it less prominent
state = Gtk.StateFlags.INSENSITIVE | Gtk.StateFlags.BACKDROP
style_context.set_state(state)
color = style_context.get_border_color(state)
add_css(dummy_widget,
"* { border-color: rgba(%d, %d, %d, 0.3); }" % (
color.red * 255, color.green * 255, color.blue * 255))
ba = background_area
ca = cell_area
# Draw over the left and right border so we don't see the rounded corners
# and borders. Use height for the overshoot as rounded corners + border
# should never be larger than the height..
# Ideally we would draw over the whole background but the cell area only
# redraws the cell_area so we get leftover artifacts if we draw
# above/below.
draw_area = (ba.x - ca.height, ca.y,
ba.width + ca.height * 2, ca.height)
cr.save()
cr.new_path()
cr.rectangle(ba.x, ca.y, ba.width, ca.height)
cr.clip()
Gtk.render_background(style_context, cr, *draw_area)
Gtk.render_frame(style_context, cr, *draw_area)
cr.restore()
style_context.restore()
class SongListCellAreaBox(Gtk.CellAreaBox):
highlight = False
def do_render(self, context, widget, cr, background_area, cell_area,
flags, paint_focus):
if self.highlight and not flags & Gtk.CellRendererState.SELECTED:
_highlight_current_cell(cr, background_area, cell_area, flags)
return Gtk.CellAreaBox.do_render(
self, context, widget, cr, background_area, cell_area,
flags, paint_focus)
def do_apply_attributes(self, tree_model, iter_, is_expander, is_expanded):
self.highlight = tree_model.get_path(iter_) == tree_model.current_path
return Gtk.CellAreaBox.do_apply_attributes(
self, tree_model, iter_, is_expander, is_expanded)
class SongListColumn(TreeViewColumnButton):
def __init__(self, tag):
"""tag e.g. 'artist'"""
title = self._format_title(tag)
super().__init__(
title=title, cell_area=SongListCellAreaBox())
self.set_tooltip_text(title)
self.header_name = tag
self.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
self.set_visible(True)
self.set_sort_indicator(False)
self._last_rendered = None
def _format_title(self, tag):
"""Format the column title based on the tag"""
return util.tag(tag)
def _needs_update(self, value):
"""Call to check if the last passed value was the same.
This is used to reduce formatting if the input is the same
either because of redraws or all columns have the same value
"""
if self._last_rendered == value:
return False
self._last_rendered = value
return True
class TextColumn(SongListColumn):
"""Base text column"""
def __init__(self, tag):
super().__init__(tag)
self._render = Gtk.CellRendererText()
self.pack_start(self._render, True)
self.set_cell_data_func(self._render, self._cdf)
self.set_clickable(True)
# We check once in a while if the font size has changed. If it has
# we reset the min/fixed width and force at least one cell to update
# (which might trigger other column size changes..)
self._last_width = None
self._force_update = False
self._deferred_width_check = util.DeferredSignal(
self._check_width_update, timeout=500)
def on_tv_changed(column, old, new):
if new is None:
self._deferred_width_check.abort()
else:
self._deferred_width_check.call()
self.connect("tree-view-changed", on_tv_changed)
def _get_min_width(self):
return -1
def _cell_width(self, text):
"""Returns the column width needed for the passed text"""
widget = self.get_tree_view()
assert widget is not None
layout = widget.create_pango_layout(text)
text_width = layout.get_pixel_size()[0]
cell_pad = self._render.get_property('xpad')
return text_width + 8 + cell_pad
def _check_width_update(self):
width = self._cell_width(u"abc 123")
if self._last_width == width:
self._force_update = False
return
self._last_width = width
self._force_update = True
self.queue_resize()
def _needs_update(self, value):
return self._force_update or \
super()._needs_update(value)
def _cdf(self, column, cell, model, iter_, user_data):
self._deferred_width_check()
if self._force_update:
min_width = self._get_min_width()
self.set_min_width(min_width)
if not self.get_resizable():
self.set_fixed_width(min_width)
# calling it in the cell_data_func leads to broken drawing..
GLib.idle_add(self.queue_resize)
value = self._fetch_value(model, iter_)
if not self._needs_update(value):
return
self._apply_value(model, iter_, cell, value)
def _fetch_value(self, model, iter_):
"""Should return everything needed for formatting the final value"""
raise NotImplementedError
def _apply_value(self, model, iter_, cell, value):
"""Should format the value and set it on the cell renderer"""
raise NotImplementedError
class RatingColumn(TextColumn):
"""Render ~rating directly
(simplifies filtering, saves a function call).
"""
def __init__(self, *args, **kwargs):
super().__init__("~rating", *args, **kwargs)
self.set_expand(False)
self.set_resizable(False)
def _get_min_width(self):
return self._cell_width(util.format_rating(1.0))
def _fetch_value(self, model, iter_):
song = model.get_value(iter_)
rating = song.get("~#rating")
default = config.RATINGS.default
return (rating, default)
def _apply_value(self, model, iter_, cell, value):
rating, default = value
cell.set_sensitive(rating is not None)
value = rating if rating is not None else default
cell.set_property('text', util.format_rating(value))
class WideTextColumn(TextColumn):
"""Resizable and ellipsized at the end. Used for any key with
a '~' in it, and 'title'.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._render.set_property('ellipsize', Pango.EllipsizeMode.END)
self.set_resizable(True)
def _get_min_width(self):
return self._cell_width("000")
def _fetch_value(self, model, iter_):
return model.get_value(iter_).comma(self.header_name)
def _apply_value(self, model, iter_, cell, value):
cell.set_property('text', value)
class DateColumn(WideTextColumn):
"""The '~#' keys that are dates."""
def _fetch_value(self, model, iter_):
return model.get_value(iter_)(self.header_name)
def _apply_value(self, model, iter_, cell, stamp):
if not stamp:
cell.set_property('text', _("Never"))
else:
fmt = config.gettext("settings", "datecolumn_timestamp_format")
text = format_date(stamp, fmt)
cell.set_property('text', text)
class NonSynthTextColumn(WideTextColumn):
"""Optimize for non-synthesized keys by grabbing them directly.
Used for any tag without a '~' except 'title'.
"""
def _fetch_value(self, model, iter_):
return model.get_value(iter_).get(self.header_name, "")
def _apply_value(self, model, iter_, cell, value):
cell.set_property('text', value.replace("\n", ", "))
class FSColumn(WideTextColumn):
"""Contains text in the filesystem encoding, so needs to be
decoded safely (and also more slowly).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._render.set_property('ellipsize', Pango.EllipsizeMode.MIDDLE)
def _fetch_value(self, model, iter_):
values = model.get_value(iter_).list(self.header_name)
return values[0] if values else fsnative(u"")
def _apply_value(self, model, iter_, cell, value):
cell.set_property('text', fsn2text(unexpand(value)))
class PatternColumn(WideTextColumn):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self._pattern = Pattern(self.header_name)
except ValueError:
self._pattern = None
def _format_title(self, tag):
return util.pattern(tag)
def _fetch_value(self, model, iter_):
song = model.get_value(iter_)
if self._pattern is not None:
return self._pattern % song
return u""
def _apply_value(self, model, iter_, cell, value):
cell.set_property('text', value)
class NumericColumn(TextColumn):
"""Any '~#' keys except dates."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._render.set_property('xalign', 1.0)
self.set_alignment(1.0)
self.set_expand(False)
self.set_resizable(False)
self._texts = {}
self._timeout = None
def on_tv_changed(column, old, new):
if new is None and self._timeout is not None:
GLib.source_remove(self._timeout)
self._timeout = None
self.connect("tree-view-changed", on_tv_changed)
def _get_min_width(self):
"""Give the initial and minimum width. override if needed"""
# Best efforts for the general minimum width case
# Allows well for >=1000 Kbps, -12.34 dB RG values, "Length" etc
return self._cell_width("-22.22")
def _fetch_value(self, model, iter_):
return model.get_value(iter_).comma(self.header_name)
def _apply_value(self, model, iter_, cell, value):
if isinstance(value, float):
text = u"%.2f" % round(value, 2)
else:
text = str(value)
cell.set_property('text', text)
self._recalc_width(model.get_path(iter_), text)
def _delayed_recalc(self):
self._timeout = None
tv = self.get_tree_view()
assert tv is not None
range_ = tv.get_visible_range()
if not range_:
return
start, end = range_
start = start[0]
end = end[0]
# compute the cell width for all drawn cells in range +/- 3
for key, value in list(self._texts.items()):
if not (start - 3) <= key <= (end + 3):
del self._texts[key]
elif isinstance(value, str):
self._texts[key] = self._cell_width(value)
# resize if too small or way too big and above the minimum
width = self.get_width()
needed_width = max(
[self._get_min_width()] + list(self._texts.values()))
if width < needed_width:
self._resize(needed_width)
elif width - needed_width >= self._cell_width("0"):
self._resize(needed_width)
def _resize(self, width):
# In case the treeview has no other expanding columns, setting the
# width will have no effect on the actual width. Calling queue_resize()
# in that case would result in an endless recalc loop. So stop here.
if width == self.get_fixed_width() and width == self.get_max_width():
return
self.set_fixed_width(width)
self.set_max_width(width)
self.queue_resize()
def _recalc_width(self, path, text):
self._texts[path[0]] = text
if self._timeout is not None:
GLib.source_remove(self._timeout)
self._timeout = None
self._timeout = GLib.idle_add(self._delayed_recalc,
priority=GLib.PRIORITY_LOW)
class LengthColumn(NumericColumn):
def __init__(self):
super().__init__("~#length")
def _get_min_width(self):
# 1:22:22, allows entire albums as files (< 75mins)
return self._cell_width(util.format_time_display(60 * 82 + 22))
def _fetch_value(self, model, iter_):
return model.get_value(iter_).get("~#length", 0)
def _apply_value(self, model, iter_, cell, value):
text = util.format_time_display(value)
cell.set_property('text', text)
self._recalc_width(model.get_path(iter_), text)
class FilesizeColumn(NumericColumn):
def __init__(self):
super().__init__("~#filesize")
def _get_min_width(self):
# e.g "2.22 MB"
return self._cell_width(util.format_size(2.22 * (1024 ** 2)))
def _fetch_value(self, model, iter_):
return model.get_value(iter_).get("~#filesize", 0)
def _apply_value(self, model, iter_, cell, value):
text = util.format_size(value)
cell.set_property('text', text)
self._recalc_width(model.get_path(iter_), text)
class CurrentColumn(SongListColumn):
"""Displays the current song indicator, either a play or pause icon."""
def __init__(self):
super().__init__("~current")
self._render = CellRendererPixbuf()
self.pack_start(self._render, True)
self._render.set_property('xalign', 0.5)
self.set_fixed_width(24)
self.set_expand(False)
self.set_cell_data_func(self._render, self._cdf)
def _format_title(self, tag):
return u""
def _cdf(self, column, cell, model, iter_, user_data):
PLAY = "media-playback-start"
PAUSE = "media-playback-pause"
STOP = "media-playback-stop"
ERROR = "dialog-error"
row = model[iter_]
if row.path == model.current_path:
player = app.player
if player.error:
name = ERROR
elif model.sourced:
name = [PLAY, PAUSE][player.paused]
else:
name = STOP
else:
name = None
if not self._needs_update(name):
return
if name is not None:
gicon = Gio.ThemedIcon.new_from_names(
[name + "-symbolic", name])
else:
gicon = None
cell.set_property('gicon', gicon)
|
gpl-2.0
| -8,249,796,910,159,764,000 | 31.376 | 79 | 0.602916 | false |
Pajn/RAXA-Django
|
common/models/Temp.py
|
1
|
1389
|
'''
Copyright (C) 2013 Rasmus Eneman <rasmus@eneman.eu>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from django.db import models
from django.forms import ModelForm, HiddenInput, ModelChoiceField
from django.utils.translation import ugettext_lazy as _
from backend.models import Thermometer, Floor
class Temp(models.Model):
floor = models.ForeignKey(Floor)
x = models.IntegerField()
y = models.IntegerField()
thermometer = models.ForeignKey(Thermometer)
class Meta:
app_label = 'common'
class TempForm(ModelForm):
thermometer = ModelChoiceField(Thermometer.objects, label=_('Thermometer'))
class Meta:
model = Temp
widgets = {
'floor': HiddenInput(),
'x': HiddenInput(),
'y': HiddenInput(),
}
|
agpl-3.0
| 3,343,948,646,732,781,000 | 31.302326 | 79 | 0.722102 | false |
geomf/omf-fork
|
omf/model/user.py
|
1
|
2023
|
#
# Open Modeling Framework (OMF) Software for simulating power systems behavior
# Copyright (c) 2015, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
from passlib.hash import pbkdf2_sha512
from omf.model.dbo import db
from omf.common.userRole import Role
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = "users"
id = db.Column(db.Integer, nullable=False, primary_key=True)
username = db.Column(db.String(80), nullable=False)
reg_key = db.Column(db.String(80), nullable =True)
timestamp = db.Column(db.TIMESTAMP(timezone=True), nullable =True)
registered = db.Column(db.Boolean, nullable=True)
csrf = db.Column(db.String(80), nullable =True)
password_digest = db.Column(db.String(200), nullable =True)
role = db.Column(db.Enum('admin', 'user', 'public', name='role'))
def __init__(self, username, reg_key = None, timestamp = None, registered = None, csrf = None, password_digest = None, role = Role.USER):
self.username = username
self.reg_key = reg_key
self.timestamp = timestamp
self.registered = registered
self.csrf = csrf
self.password_digest = password_digest
if isinstance(role, Role):
self.role = role.value
else:
self.role = role
def get_id(self): return self.username
def get_user_id(self):
return self.id
def verify_password(self, password):
return pbkdf2_sha512.verify(password, self.password_digest)
@staticmethod
def hash_password(password):
return pbkdf2_sha512.encrypt(password)
|
gpl-2.0
| 2,636,837,996,714,867,700 | 37.169811 | 141 | 0.694513 | false |
sohyongsheng/kaggle-carvana
|
plot_learning_curves.py
|
1
|
2337
|
import numpy
import matplotlib.pyplot
import pylab
import sys
def plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits = None, y_limits = None):
axes = matplotlib.pyplot.figure().gca()
x_axis = axes.get_xaxis()
x_axis.set_major_locator(pylab.MaxNLocator(integer = True))
matplotlib.pyplot.plot(epochs, train_losses)
matplotlib.pyplot.plot(epochs, cross_validation_losses)
matplotlib.pyplot.plot(epochs, dice_scores)
matplotlib.pyplot.legend(['Training loss', 'Cross validation loss', 'Dice scores'])
matplotlib.pyplot.xlabel('Epochs')
matplotlib.pyplot.ylabel('Loss or Dice score')
matplotlib.pyplot.title(experiment)
if x_limits is not None: matplotlib.pyplot.xlim(x_limits)
if y_limits is not None: matplotlib.pyplot.ylim(y_limits)
output_directory = './results/' + experiment + '/learningCurves/'
image_file = output_directory + 'learning_curves.png'
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(image_file)
def process_results(experiment, x_limits, y_limits):
output_directory = './results/' + experiment + '/learningCurves/'
train_losses = numpy.load(output_directory + 'train_losses.npy')
cross_validation_losses = numpy.load(output_directory + 'cross_validation_losses.npy')
dice_scores = numpy.load(output_directory + 'dice_scores.npy')
epochs = numpy.arange(1, len(train_losses) + 1)
plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits, y_limits)
training_curves = numpy.column_stack((epochs, train_losses, cross_validation_losses, dice_scores))
numpy.savetxt(
output_directory + 'training_curves.csv',
training_curves,
fmt = '%d, %.5f, %.5f, %.5f',
header = 'Epochs, Train loss, Cross validation loss, Dice scores'
)
if __name__ == '__main__':
dice_score_limits = [0.995, 0.997]
loss_limits = [0.02, 0.08]
x_limits = [1, 150]
# Assign either dice_score_limits or loss_limits depending on what you want to focus on.
y_limits = loss_limits
# experiments = ['experiment' + str(i) for i in [53, 60, 61]]
experiments = ['my_solution']
for experiment in experiments:
process_results(experiment, x_limits, y_limits)
|
gpl-3.0
| 1,077,009,801,315,857,200 | 43.942308 | 131 | 0.685494 | false |
czcorpus/kontext
|
lib/plugins/default_query_history/__init__.py
|
1
|
13623
|
# Copyright (c) 2013 Charles University in Prague, Faculty of Arts,
# Institute of the Czech National Corpus
# Copyright (c) 2013 Tomas Machalek <tomas.machalek@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# dated June, 1991.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
"""
A plugin providing a history for user's queries for services such as 'query history'.
Required config.xml/plugins entries: please see config.rng
"""
from datetime import datetime
import time
import random
import logging
from plugins.abstract.query_history import AbstractQueryHistory
from plugins import inject
import plugins
from manatee import Corpus
from corplib.fallback import EmptyCorpus
import strings
class CorpusCache:
def __init__(self, corpus_manager):
self._cm = corpus_manager
self._corpora = {}
def corpus(self, cname: str) -> Corpus:
if not cname:
return EmptyCorpus()
if cname not in self._corpora:
self._corpora[cname] = self._cm.get_corpus(cname)
return self._corpora[cname]
class QueryHistory(AbstractQueryHistory):
# we define a 10% chance that on write there will be a check for old records
PROB_DELETE_OLD_RECORDS = 0.1
DEFAULT_TTL_DAYS = 10
def __init__(self, conf, db, query_persistence, auth):
"""
arguments:
conf -- the 'settings' module (or some compatible object)
db -- default_db history backend
"""
tmp = conf.get('plugins', 'query_history').get('ttl_days', None)
if tmp:
self.ttl_days = int(tmp)
else:
self.ttl_days = self.DEFAULT_TTL_DAYS
logging.getLogger(__name__).warning(
'QueryHistory - ttl_days not set, using default value {0} day(s) for query history records'.format(
self.ttl_days))
self.db = db
self._query_persistence = query_persistence
self._auth = auth
self._page_num_records = int(conf.get('plugins', 'query_history')['page_num_records'])
def _current_timestamp(self):
return int(time.time())
def _mk_key(self, user_id):
return 'query_history:user:%d' % user_id
def _mk_tmp_key(self, user_id):
return 'query_history:user:%d:new' % user_id
def store(self, user_id, query_id, q_supertype):
"""
stores information about a query; from time
to time also check remove too old records
arguments:
see the super class
"""
item = dict(created=self._current_timestamp(), query_id=query_id,
name=None, q_supertype=q_supertype)
self.db.list_append(self._mk_key(user_id), item)
if random.random() < QueryHistory.PROB_DELETE_OLD_RECORDS:
self.delete_old_records(user_id)
def make_persistent(self, user_id, query_id, name):
k = self._mk_key(user_id)
data = self.db.list_get(k)
for i, item in enumerate(data):
if item.get('query_id', None) == query_id:
item['name'] = name
self.db.list_set(k, i, item)
self._query_persistence.archive(user_id, query_id)
return True
return False
def make_transient(self, user_id, query_id, name):
k = self._mk_key(user_id)
data = self.db.list_get(k)
for i, item in enumerate(data):
if item.get('query_id', None) == query_id:
item['name'] = None
self.db.list_set(k, i, item)
return True
return False
def delete(self, user_id, query_id, created):
k = self._mk_key(user_id)
data = self.db.list_get(k)
self.db.remove(k)
deleted = 0
for item in data:
if item.get('query_id') != query_id or item.get('created', 0) != created:
self.db.list_append(k, item)
else:
deleted += 1
return deleted
def _is_paired_with_conc(self, data):
q_id = data['query_id']
edata = self._query_persistence.open(q_id)
return edata and 'lastop_form' in edata
def _merge_conc_data(self, data):
q_id = data['query_id']
edata = self._query_persistence.open(q_id)
def get_ac_val(data, name, corp): return data[name][corp] if name in data else None
if edata and 'lastop_form' in edata:
ans = {}
ans.update(data)
form_data = edata['lastop_form']
main_corp = edata['corpora'][0]
if form_data['form_type'] == 'query':
ans['query_type'] = form_data['curr_query_types'][main_corp]
ans['query'] = form_data['curr_queries'][main_corp]
ans['corpname'] = main_corp
ans['subcorpname'] = edata['usesubcorp']
ans['default_attr'] = form_data['curr_default_attr_values'][main_corp]
ans['lpos'] = form_data['curr_lpos_values'][main_corp]
ans['qmcase'] = form_data['curr_qmcase_values'][main_corp]
ans['pcq_pos_neg'] = form_data['curr_pcq_pos_neg_values'][main_corp]
ans['selected_text_types'] = form_data.get('selected_text_types', {})
ans['aligned'] = []
for aitem in edata['corpora'][1:]:
ans['aligned'].append(dict(corpname=aitem,
query=form_data['curr_queries'].get(aitem),
query_type=form_data['curr_query_types'].get(aitem),
default_attr=form_data['curr_default_attr_values'].get(
aitem),
lpos=form_data['curr_lpos_values'].get(aitem),
qmcase=form_data['curr_qmcase_values'].get(aitem),
pcq_pos_neg=form_data['curr_pcq_pos_neg_values'].get(
aitem),
include_empty=get_ac_val(form_data, 'curr_include_empty_values', aitem)))
elif form_data['form_type'] == 'filter':
ans.update(form_data)
ans['corpname'] = main_corp
ans['subcorpname'] = edata['usesubcorp']
ans['aligned'] = []
ans['selected_text_types'] = {}
return ans
else:
return None # persistent result not available
def get_user_queries(self, user_id, corpus_manager, from_date=None, to_date=None, q_supertype=None, corpname=None,
archived_only=False, offset=0, limit=None):
"""
Returns list of queries of a specific user.
arguments:
see the super-class
"""
def matches_corp_prop(data, prop_name, value):
if data.get(prop_name, None) == value:
return True
for aligned in data.get('aligned', []):
if aligned[prop_name] == value:
return True
return False
corpora = CorpusCache(corpus_manager)
data = self.db.list_get(self._mk_key(user_id))
if limit is None:
limit = len(data)
data = [v for v in reversed(data)][offset:(offset + limit)]
full_data = []
for item in data:
if 'query_id' in item:
item_qs = item.get('q_supertype', item.get('qtype'))
item['q_supertype'] = item_qs # upgrade possible deprecated qtype
if item_qs is None or item_qs == 'conc':
tmp = self._merge_conc_data(item)
if not tmp:
continue
tmp['human_corpname'] = corpora.corpus(tmp['corpname']).get_conf('NAME')
for ac in tmp['aligned']:
ac['human_corpname'] = corpora.corpus(ac['corpname']).get_conf('NAME')
full_data.append(tmp)
elif item_qs == 'pquery':
stored = self._query_persistence.open(item['query_id'])
if not stored:
continue
tmp = {'corpname': stored['corpora'][0], 'aligned': []}
tmp['human_corpname'] = corpora.corpus(tmp['corpname']).get_conf('NAME')
q_join = []
for q in stored.get('form', {}).get('conc_ids', []):
stored_q = self._query_persistence.open(q)
if stored_q is None:
logging.getLogger(__name__).warning(
'Missing conc for pquery: {}'.format(q))
else:
for qs in stored_q.get('lastop_form', {}).get('curr_queries', {}).values():
q_join.append(f'{{ {qs} }}')
tmp['query'] = ' && '.join(q_join)
tmp.update(item)
tmp.update(stored)
full_data.append(tmp)
elif item_qs == 'wlist':
stored = self._query_persistence.open(item['query_id'])
if not stored:
continue
query = [stored.get('form', {}).get('wlpat')]
pfw = stored['form']['pfilter_words']
nfw = stored['form']['nfilter_words']
if len(pfw) > 0:
query.append(f'{{{", ".join(pfw)}}}')
if len(nfw) > 0:
query.append(f'NOT {{{", ".join(nfw)}}}')
tmp = dict(corpname=stored['corpora'][0],
aligned=[],
human_corpname=corpora.corpus(stored['corpora'][0]).get_conf('NAME'),
query=' AND '.join(q for q in query if q.strip() != ''))
tmp.update(item)
tmp.update(stored)
full_data.append(tmp)
else:
# deprecated type of record (this will vanish soon as there
# are no persistent history records based on the old format)
tmp = {}
tmp.update(item)
tmp['default_attr'] = None
tmp['lpos'] = None
tmp['qmcase'] = None
tmp['pcq_pos_neg'] = None
tmp['include_empty'] = None
tmp['selected_text_types'] = {}
tmp['aligned'] = []
tmp['name'] = None
full_data.append(tmp)
if from_date:
from_date = [int(d) for d in from_date.split('-')]
from_date = time.mktime(
datetime(from_date[0], from_date[1], from_date[2], 0, 0, 0).timetuple())
full_data = [x for x in full_data if x['created'] >= from_date]
if to_date:
to_date = [int(d) for d in to_date.split('-')]
to_date = time.mktime(
datetime(to_date[0], to_date[1], to_date[2], 23, 59, 59).timetuple())
full_data = [x for x in full_data if x['created'] <= to_date]
if q_supertype:
full_data = [x for x in full_data if x.get('q_supertype') == q_supertype]
if corpname:
full_data = [x for x in full_data if matches_corp_prop(
x, 'corpname', corpname)]
if archived_only:
full_data = [x for x in full_data if x.get('name', None) is not None]
for i, item in enumerate(full_data):
item['idx'] = offset + i
return full_data
def delete_old_records(self, user_id):
"""
Deletes records older than ttl_days. Named records are
kept intact.
"""
data_key = self._mk_key(user_id)
curr_data = self.db.list_get(data_key)
tmp_key = self._mk_tmp_key(user_id)
self.db.remove(tmp_key)
curr_time = time.time()
new_list = []
for item in curr_data:
if item.get('name', None) is not None:
if self._is_paired_with_conc(item):
new_list.append(item)
else:
logging.getLogger(__name__).warning(
'Removed unpaired named query {0} of concordance {1}.'.format(item['name'],
item['query_id']))
elif int(curr_time - item.get('created', 0)) / 86400 < self.ttl_days:
new_list.append(item)
for item in new_list:
self.db.list_append(tmp_key, item)
self.db.rename(tmp_key, data_key)
def export(self, plugin_ctx):
return {'page_num_records': self._page_num_records}
@inject(plugins.runtime.DB, plugins.runtime.QUERY_PERSISTENCE, plugins.runtime.AUTH)
def create_instance(settings, db, query_persistence, auth):
"""
arguments:
settings -- the settings.py module
db -- a 'db' plugin implementation
"""
return QueryHistory(settings, db, query_persistence, auth)
|
gpl-2.0
| 2,395,338,510,441,172,000 | 40.407295 | 120 | 0.51369 | false |
MikeTheGreat/GLT
|
glt/MyClasses/CourseInfo.py
|
1
|
21912
|
"""Contains the CourseInfo object"""
import datetime
import collections
import csv
import os
import shutil
import sys
#import git
import subprocess
import tempfile
import gitlab
from colorama import Fore, Style, init # Back,
init()
from glt.MyClasses.Student import Student
from glt.MyClasses.StudentCollection import StudentCollection
from glt.Parsers.ParserCSV import read_student_list_csv, CsvMode
from glt.Constants import EnvOptions
from glt.PrintUtils import print_color, print_error
from glt.GitLocalUtils import call_shell, git_clone_repo, run_command_capture_output
from glt.PrintUtils import get_logger
logger = get_logger(__name__)
# """This exists to hold an assignment & it's GitLab ID"""
HomeworkDesc = collections.namedtuple('HomeworkDesc', \
['name', 'internal_name', 'id'])
# """This exists to hold the info relevant to a recently
# downloaded/updated student project"""
StudentHomeworkUpdateDesc = collections.namedtuple('StudentHomeworkUpdateDesc', \
['student', 'student_dest_dir', 'project', 'timestamp'])
def rmtree_remove_readonly_files(func, path, exc_info):
"""
Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=rmtree_remove_readonly_files)``
This code was copied from
http://stackoverflow.com/questions/2656322/shutil-rmtree-fails-on-windows-with-access-is-denied
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
class CourseInfo(object):
"""Contains information about the course, and (de)serialize to a file
Information includes:
* a StudentCollection for the roster (in the 'no errors' pile)
* a list of assignments
"""
def __init__(self, section, input_file=None):
""" Copy the information into this object
section: string e.g., "bit142". Will be lower-cased"""
# init all instance vars:
self.section = section.lower()
self.roster = StudentCollection()
self.assignments = []
if input_file is not None:
self.read_course_info_file(input_file)
def merge_students(self, student_list):
""" Copies all students from the 'no error' pile into our roster"""
# It's weird to have a method to do this one line
# It's also weird to have other code manipulating the internal
# fields of an object
self.roster.students_no_errors.extend(student_list.students_no_errors)
def read_course_info_file(self, input_file):
"""Read file of course info, return a CourseInfo object
input_file: file object, already open for reading
== Basic file format: ==
Assignments
<A1>,<A2>,<A3><or blank line>
Roster
<CSV file of info>
"""
next_line = input_file.readline()
while next_line != "":
next_line = next_line.strip()
if next_line == "Assignments":
sz_assignment = input_file.readline().strip()
if sz_assignment != "":
chunks = sz_assignment.split(",")
i = 0
while i < len(chunks):
self.assignments.append( \
HomeworkDesc(chunks[i],chunks[i+1],int(chunks[i+2])))
i += 3
elif next_line == "Roster":
self.roster = read_student_list_csv(input_file, \
CsvMode.Internal)
break # read_student_list_csv will read to end of file
next_line = input_file.readline()
def write_data_file(self, output_file_path):
"""Write our course info file information out
output_file_path: path to file file"""
with open(output_file_path, "w") as f_out:
f_out.write("Assignments\n")
for assign in self.assignments:
f_out.write(assign.name+","+assign.internal_name+","+str(assign.id))
f_out.write("\n")
f_out.write("Roster\n")
csvwriter = csv.writer(f_out, quoting=csv.QUOTE_MINIMAL)
for student in self.roster.students_no_errors:
csvwriter.writerow([student.first_name,\
student.last_name,\
student.email,\
student.glid])
def add_student_to_project(self, glc, project_id, student):
"""Tells the GitLab server to give the student 'reporter'
level permissions on the project"""
user_permission = {'project_id': project_id, \
'access_level': 20, \
'user_id': student.glid}
# access_level:
# 10 = guest, 20 = reporter, 30 = developer, 40 = master
# Anything else causes an error (e.g., 31 != developer)
try:
#membership = glc.project_members.create(user_permission)
glc.project_members.create(user_permission)
except gitlab.exceptions.GitlabCreateError as exc:
print_error("ERROR: unable to add " + student.first_name + " " \
+ student.last_name + " to the project!")
print_error(str(exc.response_code)+": "+ exc.error_message)
return False
return True
def homework_to_project_name(self, hw_name):
"""Given hw_name (e.g., "Assign_1"), produce the GitLab project name.
Project name is {section_name}_{hw_name.lower()}"""
proj_name = hw_name.replace(" ", "").lower()
proj_name = self.section + "_" + proj_name
return proj_name
def create_homework(self, glc, env):
""" Attempt to create a homework assignment for a course
by creating a project in the GitLab server,
adding a record of the assignment to the course data file,
and giving all current students access to the project"""
proj_name = self.homework_to_project_name( \
env[EnvOptions.HOMEWORK_NAME])
project_data = {'name': proj_name, \
'issues_enabled': False, \
'wall_enabled': False, \
'merge_requests_enabled': False, \
'wiki_enabled': False, \
'snippets_enabled': False, \
'visibility_level': False, \
'builds_enabled': False, \
'public_builds': False, \
'public': False, \
}
# print(project_data)
# If the user's account already we'll get an error here:
try:
project = glc.projects.create(project_data)
print "Created : " + project.name_with_namespace
# Remember that we created the assignment
# This will be serialized to disk (in the section's data file)
# at the end of this command
self.assignments.append(HomeworkDesc(env[EnvOptions.HOMEWORK_NAME], \
proj_name, project.id))
self.assignments.sort()
except gitlab.exceptions.GitlabCreateError as exc:
# If the project already exists, look up it's info
# and then proceed to add students to it
if exc.error_message['name'][0].find("already been taken") >= 0:
proj_path = env[EnvOptions.USERNAME]+"/"+ proj_name
project = glc.projects.get(proj_path)
logger.debug( "Found existing project " + project.name_with_namespace )
else:
# For anything else, just exit here
print_error("Unable to create project " + proj_name)
print_error(str(exc.response_code)+": "+ str(exc.error_message))
exit()
#print project
# add each student to the project as a reporter
for student in self.roster.students_no_errors:
if self.add_student_to_project(glc, project.id, student):
print 'Added ' + student.first_name + " " + student.last_name
else:
print_error('ERROR: Unable to add ' + student.first_name + \
" " + student.last_name)
cwd_prev = os.getcwd()
dest_dir = os.path.join(env[EnvOptions.TEMP_DIR], project.name)
try:
# This is the part where we add the local, 'starter' repo to the
# GitLab repo.
# This should be idempotent:
# If you already have a GitLab repo, and...
# 1) If the local, starter repo and the GitLab repo are the
# same then no changes will be made to the GitLab repo
# 2) If the local, starter repo is different from the GitLab
# repo then we'll update the existing GitLab repo
git_clone_repo(env[EnvOptions.SERVER_IP_ADDR],\
project, env[EnvOptions.TEMP_DIR])
starter_project_name = "STARTER"
# next, move into the directory
# (so that subsequent commands affect the new repo)
os.chdir(dest_dir)
# TODO: Above line should be os.path.join
# Next, add a 'remote' reference in the newly-cloned repo
# to the starter project on our local machine:
call_shell("git remote add "+starter_project_name+" "\
+env[EnvOptions.HOMEWORK_DIR])
# Get all the files from the starter project:
call_shell("git fetch " + starter_project_name)
# Merge the starter files into our new project:
call_shell("git merge "+starter_project_name+"/master")
# Clean up (remove) the remove reference
# TODO: Do we actually need to do this? Refs don't get pushed,
# and we delete the whole thing in the next step...
call_shell("git remote remove "+starter_project_name)
# Push the changes back up to GitLab
call_shell("git push")
finally:
# Clear the temporary directory
os.chdir(cwd_prev)
shutil.rmtree(dest_dir, onerror=rmtree_remove_readonly_files)
def download_homework(self, glc, env):
if not self.assignments:
print_error( self.section + " doesn't have any assignments "\
" to download")
exit()
new_student_projects = list()
updated_student_projects = list()
unchanged_student_projects = list()
# After this, one of three things is true:
# 1) hw_to_download is a list of all the homework project
# 2) hw_to_download is a list of exactly one homework project
# 3) EnvOptions.HOMEWORK_NAME didn't match anything and we exit
#
# 'homework project' is a copy of the HomeworkDesc named tuple (name,id)
if env[EnvOptions.HOMEWORK_NAME].lower() == 'all':
# we're going to make a new list with all the projects
hw_name = 'all'
hw_to_download = list(self.assignments)
else:
# we're going to make a new list that should match just one
# homework assignment
hw_name = self.homework_to_project_name(env[EnvOptions.HOMEWORK_NAME])
hw_to_download = [item for item in self.assignments \
if item.name == env[EnvOptions.HOMEWORK_NAME]]
if not hw_to_download:# if list is empty
print_error( env[EnvOptions.HOMEWORK_NAME] + " (internal name: " +\
hw_name + " ) doesn't match any of the have any assignments"\
" in section " + env[EnvOptions.SECTION])
exit()
# First make sure that we can at least create the 'root'
# directory for saving all the homework projects:
dest_dir = env[EnvOptions.STUDENT_WORK_DIR]
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir) # throws an exception on fail
# Next, go through all the projects and see which
# (if any) were forked from the target project
foundAny = False
whichPage = 1
cProject = 0
try:
# If we need to clone (download) any projects we'll put them
# in the temp_dir, then move the dir containing .git (and
# all subdirs) to the place where we want them to end up
temp_dir_root = tempfile.mkdtemp() # this is an absolute path
projects = True
while projects:
logger.debug("About to retrieve page " + str(whichPage) )
projects = glc.projects.all(page=whichPage, per_page=10)
whichPage = whichPage + 1
if projects: foundAny = True
if not foundAny and not projects: # if projects list is empty
print "There are no projects present on the GitLab server"
return
logger.info( "Found the following projects:\n" )
for project in projects:
# For testing, to selectively download a single project:
#if project.id != 61:
# continue
logger.info( project.name_with_namespace + " ID: " + str(project.id) )
# See if the project matches any of the
# 1/many projects that we're trying to download
forked_project = None
if hasattr(project, 'forked_from_project'):
for hw in hw_to_download:
if project.forked_from_project['id'] == hw.id:
forked_project = hw
break
if forked_project is None:
logger.debug( "\tNOT a forked project (or not forked from "\
+ env[EnvOptions.HOMEWORK_NAME] + ")\n" )
continue
logger.info( "\tProject was forked from " + forked_project.name \
+ " (ID:"+str(forked_project.id)+")" )
owner_name = project.path_with_namespace.split('/')[0]
student = Student(username=owner_name, id=project.owner.id)
sys.stdout.write( '.' ) # This will slowly update the screen, so the
# user doesn't just wait till we're all done :)
# figure out the dir (path) for this project
student_dest_dir = student.generate_hw_dir( env, \
forked_project.name)
#cProject = cProject + 1
#if cProject == 4:
# return # exit early for testing purposes
if os.path.isdir(student_dest_dir):
# if there's already a .git repo there then refresh (pull) it
# instead of cloning it
repo_exists = False
for root, dirs, files in os.walk(student_dest_dir):
for dir in dirs:
if dir == '.git':
git_dir = os.path.join( root, dir)
logger.debug( "Found an existing repo at " + git_dir )
cwd_prev = os.getcwd()
os.chdir(root)
# in order to know if the pull actually
# changes anything we'll need to compare
# the SHA ID's of the HEAD commit before & after
sz_std_out, sz_std_err, ret_code = run_command_capture_output("git show-ref --head --heads HEAD"\
, False)
sha_pre_pull = sz_std_out.strip().split()[0].strip()
# Update the repo
call_shell("git pull")
sz_std_out, sz_std_err, ret_code = run_command_capture_output("git show-ref --head --heads HEAD"\
, False)
sha_post_pull = sz_std_out.strip().split()[0].strip()
os.chdir(cwd_prev)
hw_info = StudentHomeworkUpdateDesc(student, \
student_dest_dir, project, \
datetime.datetime.now())
if sha_pre_pull == sha_post_pull:
unchanged_student_projects.append( hw_info )
else:
updated_student_projects.append( hw_info )
# remember that we've updated it:
repo_exists = True
if repo_exists: break
if repo_exists: break
if repo_exists:
continue # don't try to clone it again
else:
logger.debug("local copy doesn't exist (yet), so clone it")
# clone the repo into the project
# The ssh connection string should look like:
# git@ubuntu:root/bit142_assign_1.git
temp_dir = os.path.join(temp_dir_root, "TEMP")
os.makedirs(temp_dir)
git_clone_repo(env[EnvOptions.SERVER_IP_ADDR], \
project, temp_dir)
# next, go find the .git dir:
found_git_dir = False;
for root, dirs, files in os.walk(temp_dir):
for dir in dirs:
if dir == '.git':
new_git_dir = root
logger.debug( "Found the git dir inside " + new_git_dir )
found_git_dir = True
if found_git_dir: break
if found_git_dir: break
if not found_git_dir:
raise Exception("Despite cloning new repo, couldn't find git dir!")
shutil.copytree(new_git_dir, student_dest_dir)
shutil.rmtree(temp_dir, onerror=rmtree_remove_readonly_files)
# add the repo into the list of updated projects
new_student_projects.append( \
StudentHomeworkUpdateDesc(student, \
student_dest_dir, project, \
datetime.datetime.now()) )
# return the list of updated projects
sys.stdout.flush()
# make sure we actually get rid of our temp directory:
finally:
shutil.rmtree(temp_dir_root, onerror=rmtree_remove_readonly_files)
return new_student_projects, updated_student_projects, unchanged_student_projects
def git_do_core(self, root_dir, git_cmds):
"""Searches for git repos in & under <root_dir>, and then
invokes each of the commands listed in <git_cmds> on the repo."""
logger.info( "Walking through " + root_dir)
cwd_prev = os.getcwd()
for current_dir, dirs, files in os.walk(root_dir):
for dir in dirs:
if dir == ".git":
os.chdir(current_dir)
# make the directory more readable by truncating
# the common root
local_dir = current_dir.replace(root_dir, "")
logger.info( Fore.LIGHTGREEN_EX + "Found repo at "\
+ local_dir + Style.RESET_ALL + "\n")
for git_cmd in git_cmds:
if callable(git_cmd):
if not git_cmd():
break # we're done here
elif isinstance(git_cmd, basestring):
call_shell(git_cmd, exit_on_fail=False)
else:
print_error( "This is neither a string nor a function:\n\t"\
+str(git_cmd))
#print "="*20 + "\n"
# note that we don't restore the current
# working dir between git commands!
os.chdir(cwd_prev)
def git_do(self, env):
""""Search through the directory rooted at ev[STUDENT_WORK_DIR]
for any git repos in/under that directory. For each one,
invoke the command on every single git repo"""
cmd_line = list()
for part in env[EnvOptions.GIT_COMMAND]:
# stuff that was originally quoted on the command line
# will show up here as a single string but WITHOUT
# the quotes.
# So we'll put them back in if they're needed
if part.find(" ") >= 0:
part = "\"" + part + "\""
cmd_line.append( part )
git_cmd = " ".join(cmd_line)
#git_cmd = "git " + git_cmd
root_dir = env[EnvOptions.STUDENT_WORK_DIR]
print "Searching " + root_dir + "\n"
self.git_do_core(root_dir, [git_cmd] )
|
gpl-3.0
| -5,202,144,037,664,315,000 | 41.798828 | 133 | 0.522453 | false |
tengqm/senlin
|
senlin/tests/apiv1/shared.py
|
1
|
4578
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from oslo_config import cfg
from oslo_log import log
from oslo_messaging._drivers import common as rpc_common
from senlin.common import consts
from senlin.common import wsgi
from senlin.tests.common import utils
def request_with_middleware(middleware, func, req, *args, **kwargs):
@webob.dec.wsgify
def _app(req):
return func(req, *args, **kwargs)
resp = middleware(_app).process_request(req)
return resp
def to_remote_error(error):
"""Converts the given exception to the one with the _Remote suffix.
"""
exc_info = (type(error), error, None)
serialized = rpc_common.serialize_remote_exception(exc_info)
remote_error = rpc_common.deserialize_remote_exception(
serialized, ["senlin.common.exception"])
return remote_error
class ControllerTest(object):
'''Common utilities for testing API Controllers.'''
def __init__(self, *args, **kwargs):
super(ControllerTest, self).__init__(*args, **kwargs)
cfg.CONF.set_default('host', 'server.test')
self.topic = consts.ENGINE_TOPIC
self.api_version = '1.0'
self.tenant = 't'
self.mock_enforce = None
log.register_options(cfg.CONF)
def _environ(self, path):
return {
'SERVER_NAME': 'server.test',
'SERVER_PORT': 8004,
'SCRIPT_NAME': '/v1',
'PATH_INFO': '/%s' % self.tenant + path,
'wsgi.url_scheme': 'http',
}
def _simple_request(self, path, params=None, method='GET'):
environ = self._environ(path)
environ['REQUEST_METHOD'] = method
if params:
qs = "&".join(["=".join([k, str(params[k])]) for k in params])
environ['QUERY_STRING'] = qs
req = wsgi.Request(environ)
req.context = utils.dummy_context('api_test_user', self.tenant)
self.context = req.context
return req
def _get(self, path, params=None):
return self._simple_request(path, params=params)
def _delete(self, path):
return self._simple_request(path, method='DELETE')
def _abandon(self, path):
return self._simple_request(path, method='DELETE')
def _data_request(self, path, data, content_type='application/json',
method='POST'):
environ = self._environ(path)
environ['REQUEST_METHOD'] = method
req = wsgi.Request(environ)
req.context = utils.dummy_context('api_test_user', self.tenant)
self.context = req.context
req.body = data
return req
def _post(self, path, data, content_type='application/json'):
return self._data_request(path, data, content_type)
def _put(self, path, data, content_type='application/json'):
return self._data_request(path, data, content_type, method='PUT')
def _patch(self, path, data, content_type='application/json'):
return self._data_request(path, data, content_type, method='PATCH')
def _url(self, cid):
host = 'server.test:8778'
path = ('/v1/%(tenant)s/clusters/%(cluster_id)s%(path)s') % cid
return 'http://%s%s' % (host, path)
def tearDown(self):
# Common tearDown to assert that policy enforcement happens for all
# controller actions
if self.mock_enforce:
self.mock_enforce.assert_called_with(
action=self.action,
context=self.context,
scope=self.controller.REQUEST_SCOPE,
target={'project_id': self.context.tenant_id})
self.assertEqual(self.expected_request_count,
len(self.mock_enforce.call_args_list))
super(ControllerTest, self).tearDown()
def _mock_enforce_setup(self, mocker, action, allowed=True,
expected_request_count=1):
self.mock_enforce = mocker
self.action = action
self.mock_enforce.return_value = allowed
self.expected_request_count = expected_request_count
|
apache-2.0
| 4,580,353,874,016,427,000 | 33.946565 | 75 | 0.627785 | false |
h3biomed/eggo
|
bin/toaster.py
|
1
|
1048
|
#! /usr/bin/env python
# Licensed to Big Data Genomics (BDG) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The BDG licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exposes top-level Luigi Tasks associated with ETLing the datasets."""
# Only top-level "user-facing" Luigi DAGs should be imported here
from eggo.dag import VCF2ADAMTask, BAM2ADAMTask
if __name__ == '__main__':
from luigi import run
run()
|
apache-2.0
| 6,761,270,908,058,689,000 | 39.307692 | 74 | 0.752863 | false |
PyMamba/mamba-storm
|
storm/schema/schema.py
|
1
|
4673
|
#
# Copyright (c) 2006, 2007 Canonical
#
# Written by Gustavo Niemeyer <gustavo@niemeyer.net>
#
# This file is part of Storm Object Relational Mapper.
#
# Storm is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# Storm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Manage database shemas.
The L{Schema} class can be used to create, drop, clean and upgrade database
schemas.
A database L{Schema} is defined by the series of SQL statements that should be
used to create, drop and clear the schema, respectively and by a patch module
used to upgrade it (see also L{PatchApplier}).
For example:
>>> store = Store(create_database('sqlite:'))
>>> creates = ['CREATE TABLE person (id INTEGER, name TEXT)']
>>> drops = ['DROP TABLE person']
>>> deletes = ['DELETE FROM person']
>>> import patch_module
>>> schema = Schema(creates, drops, deletes, patch_module)
>>> schema.create(store)
where patch_module is a Python module containing database patches used to
upgrade the schema over time.
"""
from storm.locals import StormError
from storm.schema.patch import PatchApplier
class Schema(object):
"""Create, drop, clean and patch table schemas.
@param creates: A list of C{CREATE TABLE} statements.
@param drops: A list of C{DROP TABLE} statements.
@param deletes: A list of C{DELETE FROM} statements.
@param patch_package: The Python package containing patch modules to apply.
@param committer: Optionally a committer to pass to the L{PatchApplier}.
@see: L{PatchApplier}.
"""
_create_patch = "CREATE TABLE patch (version INTEGER NOT NULL PRIMARY KEY)"
_drop_patch = "DROP TABLE IF EXISTS patch"
_autocommit = True
def __init__(self, creates, drops, deletes, patch_package, committer=None):
self._creates = creates
self._drops = drops
self._deletes = deletes
self._patch_package = patch_package
self._committer = committer
def _execute_statements(self, store, statements):
"""Execute the given statements in the given store."""
for statement in statements:
try:
store.execute(statement)
except Exception:
print "Error running %s" % statement
raise
if self._autocommit:
store.commit()
def autocommit(self, flag):
"""Control whether to automatically commit/rollback schema changes.
The default is C{True}, if set to C{False} it's up to the calling code
to handle commits and rollbacks.
@note: In case of rollback the exception will just be propagated, and
no rollback on the store will be performed.
"""
self._autocommit = flag
def create(self, store):
"""Run C{CREATE TABLE} SQL statements with C{store}."""
self._execute_statements(store, [self._create_patch])
self._execute_statements(store, self._creates)
def drop(self, store):
"""Run C{DROP TABLE} SQL statements with C{store}."""
self._execute_statements(store, self._drops)
self._execute_statements(store, [self._drop_patch])
def delete(self, store):
"""Run C{DELETE FROM} SQL statements with C{store}."""
self._execute_statements(store, self._deletes)
def upgrade(self, store):
"""Upgrade C{store} to have the latest schema.
If a schema isn't present a new one will be created. Unapplied
patches will be applied to an existing schema.
"""
class NoopCommitter(object):
commit = lambda _: None
rollback = lambda _: None
committer = self._committer if self._autocommit else NoopCommitter()
patch_applier = PatchApplier(store, self._patch_package, committer)
try:
store.execute("SELECT * FROM patch WHERE version=0")
except StormError:
# No schema at all. Create it from the ground.
store.rollback()
self.create(store)
patch_applier.mark_applied_all()
if self._autocommit:
store.commit()
else:
patch_applier.apply_all()
|
lgpl-2.1
| -5,923,921,601,448,790,000 | 35.224806 | 79 | 0.661459 | false |
texastribune/tx_election_scrapers
|
tx_elections_scrapers/sos/test_utils.py
|
1
|
1962
|
from __future__ import unicode_literals
import unittest
from .utils import slugify, corrected
from . import utils
class correctedTest(unittest.TestCase):
def test_it_works(self):
utils._corrections_cache = {
'all': {
'a0': 'a1',
},
'foo': {
'a0': 'a2',
},
'bar': {
'a0': 'a3',
},
}
self.assertEqual(corrected('a0'), 'a1')
self.assertEqual(corrected('missing'), 'missing')
self.assertEqual(corrected('a0', corrections=['foo']), 'a2')
self.assertEqual(corrected('a0', corrections=('foo', 'bar',)), 'a3')
# assert uses lastest slug
self.assertEqual(
corrected('a0', corrections=['foo', 'bar', 'xyzzy']), 'a3')
# assert ignores nonexistent ones in general
self.assertEqual(
corrected('a0', corrections=['foo', 'poop', 'bar', 'xyzzy']), 'a3')
def test_always_uses_original_slug(self):
utils._corrections_cache = {
'all': {
'a0': 'a1',
},
'foo': {
'a0': 'a2',
'a1': 'b1',
},
'bar': {
'a0': 'a3',
'b1': 'c1',
},
}
self.assertEqual(corrected('a0'), 'a1')
self.assertEqual(corrected('a0', corrections=('foo', )), 'a2')
self.assertEqual(corrected('a0', corrections=['foo', 'bar']), 'a3')
class SlugifyTest(unittest.TestCase):
def test_it_works(self):
self.assertEqual(slugify(''), '')
self.assertEqual(slugify('a'), 'a')
self.assertEqual(slugify('a b'), 'a-b')
self.assertEqual(slugify('Ronald McDonald'), 'ronald-mcdonald')
self.assertEqual(slugify('M.C. Chris'), 'mc-chris')
self.assertEqual(slugify('foo', corrections=['poop']), 'foo')
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
| 4,407,651,845,405,037,600 | 29.65625 | 79 | 0.494903 | false |
jbinary/django-database-email-backend
|
database_email_backend/admin.py
|
1
|
5693
|
#-*- coding: utf-8 -*-
from functools import update_wrapper
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect
from django.contrib import admin
from django import forms
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.core.mail import message
from django.db.models import Count
from django.utils.translation import ugettext as _
from database_email_backend.models import Email, Attachment
WIDE_INPUT_SIZE = '80'
###################
# view sent email #
###################
class AttachmentInlineAdmin(admin.TabularInline):
model = Attachment
extra = 0
can_delete = False
max_num = 0
readonly_fields = ('filename', 'mimetype', 'content', 'file_link',)
fields = ('file_link', 'mimetype',)
def file_link(self, obj):
url_name = '%s:%s_email_attachment' % (self.admin_site.name, self.model._meta.app_label,)
kwargs={
'email_id': str(obj.email_id),
'attachment_id': str(obj.id),
'filename': str(obj.filename)}
url = reverse(url_name, kwargs=kwargs)
return u'<a href="%(url)s">%(filename)s</a>' % {'filename': obj.filename, 'url': url}
file_link.allow_tags = True
class EmailAdmin(admin.ModelAdmin):
list_display = ('from_email', 'to_emails', 'subject', 'body_stripped', 'sent_at', 'attachment_count')
date_hierarchy = 'sent_at'
search_fields = ('from_email', 'to_emails', 'subject', 'body',)
exclude = ('raw', 'body')
readonly_fields = list_display + ('cc_emails', 'bcc_emails', 'all_recipients', 'headers', 'body_br',)
inlines = (AttachmentInlineAdmin,)
def queryset(self, request):
queryset = super(EmailAdmin, self).queryset(request)
return queryset.annotate(attachment_count_cache=Count('attachments'))
def attachment_count(self, obj):
return obj.attachment_count
attachment_count.admin_order_field = 'attachment_count_cache'
def body_stripped(self, obj):
return obj.body
body_stripped.short_description = 'body'
body_stripped.admin_order_field = 'body'
def get_urls(self):
urlpatterns = super(EmailAdmin, self).get_urls()
from django.conf.urls import patterns, url
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
appname = self.model._meta.app_label
urlpatterns = patterns('',
url(r'^(?P<email_id>\d+)/attachments/(?P<attachment_id>\d+)/(?P<filename>[\w.]+)$',
wrap(self.serve_attachment),
name='%s_email_attachment' % appname)
) + urlpatterns
return urlpatterns
def serve_attachment(self, request, email_id, attachment_id, filename, extra_context=None):
if not self.has_change_permission(request, None):
raise PermissionDenied
attachment = Attachment.objects.get(email__id=email_id, id=attachment_id, filename=filename)
response = HttpResponse(attachment.content, mimetype=attachment.mimetype or 'application/octet-stream')
response["Content-Length"] = len(attachment.content)
return response
def body_br(self, obj):
return obj.body
body_br.allow_tags = True
body_br.short_description = 'body'
body_br.admin_order_field = 'body'
admin.site.register(Email, EmailAdmin)
##############
# send email #
##############
class SendEmail(Email):
class Meta:
proxy = True
class SendEmailForm(forms.ModelForm):
class Meta:
model = SendEmail
widgets = {
'from_email': forms.TextInput(attrs={'size': '30'}),
'to_emails': forms.TextInput(attrs={'size': WIDE_INPUT_SIZE}),
'cc_emails': forms.TextInput(attrs={'size': WIDE_INPUT_SIZE}),
'bcc_emails': forms.TextInput(attrs={'size': WIDE_INPUT_SIZE}),
'subject': forms.TextInput(attrs={'size': WIDE_INPUT_SIZE}),
}
fields = ['from_email', 'to_emails', 'cc_emails', 'bcc_emails',
'all_recipients', 'headers', 'subject', 'body', 'raw']
class SendEmailAdmin(admin.ModelAdmin):
form = SendEmailForm
fieldsets = (
(None, {'fields':('from_email', 'to_emails')}),
(_('cc and bcc'), {
'fields': ('cc_emails', 'bcc_emails'),
'classes': ('collapse',)}),
(None, {'fields': ('subject', 'body')}),
)
def save_model(self, request, obj, form, change):
"""
sends the email and does not save it
"""
email = message.EmailMessage(
subject=obj.subject,
body=obj.body,
from_email=obj.from_email,
to=[t.strip() for t in obj.to_emails.split(',')],
bcc=[t.strip() for t in obj.bcc_emails.split(',')],
cc=[t.strip() for t in obj.cc_emails.split(',')]
)
email.send()
def response_add(self, request, obj, post_url_continue=None):
msg = _('The Email was sent successfully.')
self.message_user(request, msg)
if "_addanother" in request.POST:
return HttpResponseRedirect(request.path)
return HttpResponseRedirect('../../')
def has_delete_permission(self, request, obj=None):
return False
def has_change_permission(self, request, obj=None):
return False
def get_model_perms(self, request):
return {
'add': self.has_add_permission(request),
'change': False,
'delete': False
}
admin.site.register(SendEmail, SendEmailAdmin)
|
mit
| 2,261,844,363,294,276,000 | 33.50303 | 111 | 0.607764 | false |
troismph/matasano-challenges
|
src/dh.py
|
1
|
1134
|
from math_g4z3 import mod_exp
from converts import big_int_to_bin_str
import random
DH_p = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff
DH_g = 2
class DHClient:
def __init__(self, p=DH_p, g=DH_g):
self.priv_key = random.randrange(p)
self.pub_key = mod_exp(g, self.priv_key, p)
def get_shared_key(self, pub_key):
return big_int_to_bin_str(mod_exp(pub_key, self.priv_key, DH_p))
def test_dh():
rounds = 10
test_pass = True
for x in range(rounds):
dha = DHClient()
dhb = DHClient()
sa = dha.get_shared_key(dhb.pub_key)
sb = dhb.get_shared_key(dha.pub_key)
if sa != sb:
print "Test fail"
test_pass = False
if test_pass:
print "Test pass with {n} cases".format(n=rounds)
|
gpl-3.0
| 4,353,548,245,432,018,400 | 35.612903 | 393 | 0.72134 | false |
chronicle/api-samples-python
|
detect/v2/get_error_test.py
|
1
|
2456
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the "get_error" module."""
import unittest
from unittest import mock
from google.auth.transport import requests
from . import get_error
class GetErrorTest(unittest.TestCase):
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_http_error(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=400)
mock_response.raise_for_status.side_effect = (
requests.requests.exceptions.HTTPError())
with self.assertRaises(requests.requests.exceptions.HTTPError):
get_error.get_error(mock_session, "")
@mock.patch.object(requests, "AuthorizedSession", autospec=True)
@mock.patch.object(requests.requests, "Response", autospec=True)
def test_happy_path(self, mock_response, mock_session):
mock_session.request.return_value = mock_response
type(mock_response).status_code = mock.PropertyMock(return_value=200)
error_id = "ed_12345678-1234-1234-1234-1234567890ab"
version_id = "ru_12345678-1234-1234-1234-1234567890ab@v_100000_000000"
expected_error = {
"errorId": error_id,
"text": "something went wrong",
"category": "RULES_EXECUTION_ERROR",
"errorTime": "2020-11-05T00:00:00Z",
"metadata": {
"ruleExecution": {
"windowStartTime": "2020-11-05T00:00:00Z",
"windowEndTime": "2020-11-05T01:00:00Z",
"ruleId": "ru_12345678-1234-1234-1234-1234567890ab",
"versionId": version_id,
},
},
}
mock_response.json.return_value = expected_error
error = get_error.get_error(mock_session, error_id)
self.assertEqual(error, expected_error)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
| 6,060,797,037,974,437,000 | 36.212121 | 74 | 0.69259 | false |
marcua/qurk_experiments
|
qurkexp/hitlayer/models.py
|
1
|
10554
|
import sys,os,base64,time,math,hashlib,traceback
from datetime import datetime, timedelta
from qurkexp.hitlayer import settings
from django.db import models
from django.db import transaction
from boto.mturk import connection, question, price, qualification
class HIT(models.Model):
hid = models.CharField(max_length=128)
autoapprove = models.BooleanField(default=True)
cbname = models.CharField(max_length=128, null=True)
done = models.BooleanField(default=False)
start_tstamp = models.DateTimeField(auto_now_add=True)
url = models.TextField(null=True)
def _cbargs(self):
return [arg.val for arg in self.cbarg_set.order_by('idx')]
cbargs = property(_cbargs)
def kill(self):
if self.done: return
try:
HitLayer.get_instance().kill_job(self.hid)
self.done = True
self.save()
except:
pass
class CBArg(models.Model):
hit = models.ForeignKey(HIT)
idx = models.IntegerField()
val = models.CharField(max_length=128)
class HitLayer(object):
"""
continuously keeps HITs on MTurk until they're completed
if a HIT expires, it extends the lifetime
approves all results seen
"""
myinstance = None
def __init__(self):
self.conn = self.get_conn()
self.generators = dict()
HitLayer.myinstance = self
@staticmethod
def get_instance():
if HitLayer.myinstance == None:
HitLayer.myinstance = HitLayer()
return HitLayer.myinstance
def register_cb_generator(self, gname, generator):
"""
The generator creates and returns a callback function that takes the
hitid and answers (list of dictionaries) as input
The generator itself can take a number of arguments that help setup
the context for the callback
The generator is of the format:
def generator(arg1, arg2,...):
# setup code, based on arg1, arg2...
def cb(hitid, answers):
pass
return cb
"""
if gname in self.generators:
print "Warning: overwriting existing generator with name: ", gname
self.generators[gname] = generator
def get_conn(self,sandbox=settings.SANDBOX):
"""
returns a connection.
requires that settings defines the following variables:
settings.SANDBOX: True|False
settings.aws_id: your aws id
settings.aws_secret: your aws secret key
"""
if sandbox:
host="mechanicalturk.sandbox.amazonaws.com"
else:
host="mechanicalturk.amazonaws.com"
return connection.MTurkConnection(
aws_access_key_id=settings.aws_id,
aws_secret_access_key=settings.aws_secret,
host=host)
def persist_hit(self, hitid, autoapprove, callback, url):
"""
Save HIT information in database
"""
hit = HIT(hid = hitid, autoapprove=autoapprove, url=url)
hit.save()
if callback:
hit.cbname = callback[0]
for i, arg in enumerate(callback[1]):
cbarg = CBArg(hit=hit, idx=i, val=str(arg))
cbarg.save()
hit.save()
def create_job(self, url, callback, nassignments = 1, title='', desc='', timeout=60*5, price=0.02, autoapprove=True):
if not url: return None
c = self.conn
balance = c.get_account_balance()
if balance < price:
return None
if balance < price * 10:
print "Warning: you are running low on funds: ", balance
price = connection.MTurkConnection.get_price_as_price(price)
q = question.ExternalQuestion('%s%s' % (settings.URLROOT, url), 750)
print url, title, price, timeout
rs = c.create_hit(hit_type=None,
question=q,
lifetime=timedelta(5),
max_assignments=nassignments,
title=title,
duration=timedelta(seconds=timeout),
approval_delay=60*60,
description=desc,
reward = price)
hitids = [r.HITId for r in rs if hasattr(r, 'HITId')]
if len(hitids) == 0: return None
hitid = hitids[0]
self.persist_hit(hitid, autoapprove, callback, url)
return hitid
def kill_job(self, hitid, msg='Thank you for doing this HIT'):
"""
Approve finished assignments, then expire, disable, and dispose the HIT
"""
try:
c = self.conn
print 'kill_job ', hitid
rs = c.get_assignments(hitid, status='Submitted')
for ass in rs:
c.approve_assignment(ass.AssignmentId, msg)
killed = False
try:
c.expire_hit(hitid)
killed = True
except Exception as ee:
pass
try:
c.disable_hit(hitid)
killed = True
except Exception as ee:
pass
try:
c.dispose_hit(hitid)
killed = True
except Exception as ee:
pass
print 'killed?', killed
except Exception as e:
print e
@transaction.commit_manually
def check_hits(self):
"""
Iterates through all the HITs in the database where done=False
For each HIT that is in the Reviewable state (all assignments completed)
1) extracts all assignment results
2) if autoapprove is true, approved the assignments and disposes the HIT
3) executes the HIT's callback function
4) sets the HIT database object to done=True
"""
try:
for hitobj in HIT.objects.filter(done=False):
try:
self.check_hit(hitobj)
except Exception as e:
print 'check_hits: problem processing', hitobj.hid
print e
print traceback.print_exc()
transaction.rollback()
else:
transaction.commit()
finally:
# django transactions are fucked.
# this is the dumbest piece of code i've ever written.
transaction.rollback()
def check_hit(self, hitobj):
hitid = hitobj.hid
mthit = self.get_mthit(hitid)
if not mthit:
print 'check_hits: didnt find hit for ', hitid
return
status = mthit.HITStatus
print 'check_hits', hitid, status, hitobj.url
if status == 'Reviewable':
if self.process_reviewable(hitid, hitobj.autoapprove):
hitobj.done = True
hitobj.save()
else:
print 'check_hits: problem processing', hitid
# the previous block should be atomic
print 'approved', hitobj.hid
elif status in ['Assignable','Disposed','Unassignable','Reviewing']:
pass
def process_reviewable(self, hitid, autoapprove):
allans = self.get_hit_answers(hitid)
if autoapprove:
for ans in allans:
if ans['purk_status'] == 'Submitted':
aid = ans['purk_aid']
print "approving assignment ", aid, ans['purk_status']
self.approve_assignment(aid)
cb = self.get_callback(hitid)
cb(hitid, allans)
# should remember that callback was executed, so we
# don't execute it again!
self.conn.dispose_hit(hitid)
print 'hit disposed', hitid
return True
def get_hit_answers(self, hitid):
"""
retrieves all the answers for all assignments for the HIT
returns an array of dictionaries.
Each dictionary has several purk-related entries:
purk_aid assignment id
purk_wid worker id
purk_atime accept time (datetime object)
purk_stime submit time (datetime object)
purk_length submit time - accept time (timedelta object)
"""
allans =[]
for a in self.get_assignments(hitid):
aid = a.AssignmentId
wid = a.WorkerId
accepttime = datetime.strptime(a.AcceptTime, '%Y-%m-%dT%H:%M:%SZ')
submittime = datetime.strptime(a.SubmitTime, '%Y-%m-%dT%H:%M:%SZ')
d = {}
d['purk_status'] = a.AssignmentStatus
d['purk_aid'] = aid
d['purk_wid'] = wid
d['purk_atime'] = accepttime
d['purk_stime'] = submittime
d['purk_length'] = submittime - accepttime
for answers in a.answers:
for answer in answers:
d.update((k,v) for k,v in answer.fields)
allans.append(d)
return allans
def get_mthit(self, hitid):
"""
Return the mechanical turk (boto) HIT object, or None if HIT is not found
"""
try:
return self.conn.get_hit(hitid)[0]
except:
return None
def get_hit(self, hitid):
"""
Return the HIT database object
"""
try:
return HIT.objects.get(hid=hitid)
except:
return None
def get_assignments(self, hitid):
"""
Return the boto assignment objects from mechanical turk
"""
return self.conn.get_assignments(hitid)
def approve_assignment(self, aid):
self.conn.approve_assignment(aid, "Thank you!")
def reject_assignment(self, aid, msg=""):
self.conn.reject_assignment(aid, msg)
def get_callback(self, hitid):
def dummycb(hitid, answers):
pass
hit = self.get_hit(hitid)
try:
# if not hit or not hit.cbname:
# return None
# if hit.cbname not in self.generators:
# return None
ret = self.generators[hit.cbname](*hit.cbargs)
if ret:
return ret
return dummycb
except Exception as e:
print e
print traceback.print_exc()
return dummycb
|
bsd-3-clause
| -3,170,994,810,895,449,600 | 31.776398 | 121 | 0.544722 | false |
WarriorIng64/type-fight
|
gamelog.py
|
1
|
1492
|
#!/bin/python
import datetime, logging, platform, pygame, os
from gameglobals import *
# Logging setup
if platform.system() == 'Windows':
# Logging on Windows
logdir = os.path.join(os.getenv('APPDATA'), 'typefight')
else:
# Assume Linux
logdir = os.path.join(os.path.expanduser('~'), '.typefight')
try:
os.makedirs(logdir)
except OSError:
if not os.path.isdir(logdir):
raise
logname = os.path.join(logdir, 'typefight.log')
logging.basicConfig(filename=logname, filemode='w',
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.DEBUG)
# Write initial info to log
logging.info('TypeFight! version ' + GAME_VERSION)
logging.info('Pygame version: ' + pygame.version.ver)
logging.info('Platform: ' + platform.system())
logging.info('FPS target: ' + str(FPS_TARGET))
logging.info('Logging further messages at log level ' + \
str(logging.getLogger().getEffectiveLevel()))
def log_display_info():
'''Records some display information to the log, which could be useful if
graphics issues arise.'''
logging.info('Display driver: ' + pygame.display.get_driver())
logging.info('Display info: ' + str(pygame.display.Info()))
wm_info = pygame.display.get_wm_info()
wm_info_string = ''
for key in wm_info:
wm_info_string += '\n\t' + key + ':\t' + str(wm_info[key])
logging.info('Window manager info:' + wm_info_string)
|
gpl-3.0
| 5,761,751,310,062,124,000 | 35.390244 | 76 | 0.643432 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.