repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
Ernestyj/PyStudy
DataScience/python/tools/test/test_example.py
Python
apache-2.0
577
0.003466
# -*- coding: utf-8 -*- import unittest
class TestExample(unittest.TestCase): @classmethod def setUpClass(cls): print("**************************************** setUpClass ****************************************") @classmethod def tearDownClass(cls): print("************************************** tearDownClass ************************************
***") def setUp(self): print("****** setUp *******") def tearDown(self): print("***** tearDown *****") def _example(self): print("This is a test example.")
CZ-NIC/knot
tests-extra/tests/modules/onlinesign_rollovers/test.py
Python
gpl-3.0
7,474
0.006422
#!/usr/bin/en
v python3 """ Check of automatic algorithm rollover scenario. """ import collections import os import shutil import datetime import random import subprocess from subprocess im
port check_call from dnstest.utils import * from dnstest.keys import Keymgr from dnstest.test import Test from dnstest.module import ModOnlineSign def pregenerate_key(server, zone, alg): class a_class_with_name: def __init__(self, name): self.name = name server.gen_key(a_class_with_name("nonexistent.zone."), ksk=True, alg=alg, addtopolicy="blahblah") # check zone if keys are present and used for signing def check_zone(server, zone, dnskeys, dnskey_rrsigs, cdnskeys, soa_rrsigs, msg): qdnskeys = server.dig("example.com", "DNSKEY", bufsize=4096) found_dnskeys = qdnskeys.count("DNSKEY") qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096) found_rrsigs = qdnskeyrrsig.count("RRSIG") qcdnskey = server.dig("example.com", "CDNSKEY", bufsize=4096) found_cdnskeys = qcdnskey.count("CDNSKEY") qsoa = server.dig("example.com", "SOA", dnssec=True, bufsize=4096) found_soa_rrsigs = qsoa.count("RRSIG") check_log("DNSKEYs: %d (expected %d)" % (found_dnskeys, dnskeys)); check_log("RRSIGs: %d (expected %d)" % (found_soa_rrsigs, soa_rrsigs)); check_log("DNSKEY-RRSIGs: %d (expected %d)" % (found_rrsigs, dnskey_rrsigs)); check_log("CDNSKEYs: %d (expected %d)" % (found_cdnskeys, cdnskeys)); if found_dnskeys != dnskeys: set_err("BAD DNSKEY COUNT: " + msg) detail_log("!DNSKEYs not published and activated as expected: " + msg) if found_soa_rrsigs != soa_rrsigs: set_err("BAD RRSIG COUNT: " + msg) detail_log("!RRSIGs not published and activated as expected: " + msg) if found_rrsigs != dnskey_rrsigs: set_err("BAD DNSKEY RRSIG COUNT: " + msg) detail_log("!RRSIGs not published and activated as expected: " + msg) if found_cdnskeys != cdnskeys: set_err("BAD CDNSKEY COUNT: " + msg) detail_log("!CDNSKEYs not published and activated as expected: " + msg) detail_log(SEP) def wait_for_rrsig_count(t, server, rrtype, rrsig_count, timeout): rtime = 0 while True: qdnskeyrrsig = server.dig("example.com", rrtype, dnssec=True, bufsize=4096) found_rrsigs = qdnskeyrrsig.count("RRSIG") if found_rrsigs == rrsig_count: break rtime = rtime + 1 t.sleep(1) if rtime > timeout: break def wait_for_dnskey_count(t, server, dnskey_count, timeout): rtime = 0 while True: qdnskeyrrsig = server.dig("example.com", "DNSKEY", dnssec=True, bufsize=4096) found_dnskeys = qdnskeyrrsig.count("DNSKEY") if found_dnskeys == dnskey_count: break rtime = rtime + 1 t.sleep(1) if rtime > timeout: break def wait_for_cds_change(t, server, timeout): rtime = 0 CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()) while True: CDS2 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()) if CDS1 != CDS2: break rtime = rtime + 1 t.sleep(1) if rtime > timeout: break def watch_alg_rollover(t, server, zone, before_keys, after_keys, desc, set_alg, key_len, submission_cb): check_zone(server, zone, before_keys, 1, 1, 1, desc + ": initial keys") z = server.zones[zone[0].name]; z.get_module("onlinesign").algorithm = set_alg z.get_module("onlinesign").key_size = key_len server.gen_confile() server.reload() wait_for_rrsig_count(t, server, "SOA", 2, 20) check_zone(server, zone, before_keys, 1, 1, 2, desc + ": pre active") wait_for_dnskey_count(t, server, before_keys + after_keys, 20) check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": both algorithms active") # wait for any change in CDS records CDS1 = str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()) t.sleep(3) while CDS1 == str(server.dig(ZONE, "CDS").resp.answer[0].to_rdataset()): t.sleep(1) check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": new KSK ready") submission_cb() t.sleep(4) check_zone(server, zone, before_keys + after_keys, 2, 1, 2, desc + ": both still active") wait_for_dnskey_count(t, server, after_keys, 20) check_zone(server, zone, after_keys, 1, 1, 2, desc + ": post active") wait_for_rrsig_count(t, server, "SOA", 1, 20) check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old alg removed") def watch_ksk_rollover(t, server, zone, before_keys, after_keys, total_keys, desc, set_ksk_lifetime, submission_cb): check_zone(server, zone, before_keys, 1, 1, 1, desc + ": initial keys") z = server.zones[zone[0].name]; orig_ksk_lifetime = z.get_module("onlinesign").ksk_life z.get_module("onlinesign").ksk_life = set_ksk_lifetime if set_ksk_lifetime > 0 else orig_ksk_lifetime server.gen_confile() server.reload() wait_for_dnskey_count(t, server, total_keys, 20) t.sleep(3) check_zone(server, zone, total_keys, 2, 1, 1, desc + ": published new") z.get_module("onlinesign").ksk_life = orig_ksk_lifetime server.gen_confile() server.reload() wait_for_cds_change(t, server, 20) expect_zone_rrsigs = (2 if before_keys == 1 and after_keys > 1 else 1) # there is an exception for CSK->KZSK rollover that we have double signatures for the zone. Sorry, we don't care... check_zone(server, zone, total_keys, 2, 1, expect_zone_rrsigs, desc + ": new KSK ready") submission_cb() t.sleep(4) if before_keys < 2 or after_keys > 1: check_zone(server, zone, total_keys, 2, 1, 1, desc + ": both still active") # else skip the test as we have no control on KSK and ZSK retiring asynchronously wait_for_dnskey_count(t, server, after_keys, 28) check_zone(server, zone, after_keys, 1, 1, 1, desc + ": old key removed") t = Test(stress=False) ModOnlineSign.check() parent = t.server("knot") parent_zone = t.zone("com.", storage=".") t.link(parent_zone, parent) parent.dnssec(parent_zone).enable = True child = t.server("knot") child_zone = t.zone("example.com.", storage=".") t.link(child_zone, child) def cds_submission(): cds = child.dig(ZONE, "CDS") cds_rdata = cds.resp.answer[0].to_rdataset()[0].to_text() up = parent.update(parent_zone) up.delete(ZONE, "DS") up.add(ZONE, 7, "DS", cds_rdata) up.send("NOERROR") child.zonefile_sync = 24 * 60 * 60 child.dnssec(child_zone).ksk_sbm_check = [ parent ] child.add_module(child_zone, ModOnlineSign("ECDSAP384SHA384", key_size="384", prop_delay=11, ksc = [ parent ], ksci = 2, ksk_shared=True, cds_publish="always", cds_digesttype=random.choice(["sha256", "sha384"]))) # parameters ZONE = "example.com." t.start() child.zone_wait(child_zone) cds_submission() # pass initially generated key to active state t.sleep(4) # let the server accept the submission before forced reload pregenerate_key(child, child_zone, "ECDSAP384SHA384") watch_ksk_rollover(t, child, child_zone, 1, 1, 2, "CSK rollover", 22, cds_submission) pregenerate_key(child, child_zone, "ECDSAP256SHA256") watch_alg_rollover(t, child, child_zone, 1, 1, "CSK to CSK alg", "ECDSAP256SHA256", 256, cds_submission) t.end()
MSeifert04/astropy
astropy/timeseries/periodograms/lombscargle/tests/test_statistics.py
Python
bsd-3-clause
7,519
0.000532
import numpy as np import pytest from numpy.testing import assert_allclose try: import scipy except ImportError: HAS_SCIPY = False else: HAS_SCIPY = True import astropy.units as u from astropy.timeseries.periodograms.lombscargle import LombScargle from astropy.timeseries.periodograms.lombscargle._statistics import (fap_single, inv_fap_single, METHODS) from astropy.timeseries.periodograms.lombscargle.utils import convert_normalization, compute_chi2_ref METHOD_KWDS = dict(bootstrap={'n_bootstraps': 20, 'random_seed': 42}) NORMALIZATIONS = ['standard', 'psd', 'log', 'model'] def make_data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0, units=False): """Generate some data for testing""" rng = np.random.RandomState(rseed) t = 5 * period * rng.rand(N) omega = 2 * np.pi / period y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t) dy = dy * (0.5 + rng.rand(N)) y += dy * rng.randn(N) fmax = 5 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax def null_data(N=1000, dy=1, rseed=0, units=False): """Generate null hypothesis data""" rng = np.random.RandomState(rseed) t = 100 * rng.rand(N) dy = 0.5 * dy * (1 + rng.rand(N)) y = dy * rng.randn(N) fmax = 40 if units: return t * u.day, y * u.mag, dy * u.mag, fmax / u.day else: return t, y, dy, fmax @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('with_errors', [True, False]) @pytest.mark.parametrize('units', [False, True]) def test_distribution(normalization, with_errors, units): t, y, dy, fmax = null_data(units=units) if not with_errors: dy = None ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) z = np.linspace(0, power.max(), 1000) # Test that pdf and cdf are consistent dz = z[1] - z[0] z_mid = z[:-1] + 0.5 * dz pdf = ls.distribution(z_mid) cdf = ls.distribution(z, cumulative=True) if isinstance(dz, u.Quantity): dz = dz.value assert_allclose(pdf, np.diff(cdf) / dz, rtol=1E-5, atol=1E-8) # psd normalization without specified errors produces bad results if not (normalization == 'psd' and not with_errors): # Test that observed power is distributed according to the theoretical pdf hist, bins = np.histogram(power, 30, density=True) midpoints = 0.5 * (bins[1:] + bins[:-1]) pdf = ls.distribution(midpoints) assert_allclose(hist, pdf, rtol=0.05, atol=0.05 * pdf[0]) @pytest.mark.parametrize('N', [10, 100, 1000]) @pytest.mark.parametrize('normalization', NORMALIZATIONS) def test_inverse_single(N, normalization): fap = np.linspace(0, 1, 11) z = inv_fap_single(fap, N, normalization) fap_out = fap_single(z, N, normalization) assert_allclose(fap, fap_out) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('units', [False, True]) def test_inverse_bootstrap(normalization, use_errs, units): t, y, dy, fmax = null_data(units=units) if
not use_errs: dy = None fap = np.linspace(0, 1, 11) method = 'bootstrap' method_kwds = METHOD_KWDS['bootstrap'] ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=me
thod_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) # atol = 1 / n_bootstraps assert_allclose(fap, fap_out, atol=0.05) @pytest.mark.parametrize('method', sorted(set(METHODS) - {'bootstrap'})) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('N', [10, 100, 1000]) @pytest.mark.parametrize('units', [False, True]) def test_inverses(method, normalization, use_errs, N, units, T=5): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") t, y, dy, fmax = make_data(N, rseed=543, units=units) if not use_errs: dy = None method_kwds = METHOD_KWDS.get(method, None) fap = np.logspace(-10, 0, 11) ls = LombScargle(t, y, dy, normalization=normalization) z = ls.false_alarm_level(fap, maximum_frequency=fmax, method=method, method_kwds=method_kwds) fap_out = ls.false_alarm_probability(z, maximum_frequency=fmax, method=method, method_kwds=method_kwds) assert_allclose(fap, fap_out) @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('normalization', NORMALIZATIONS) @pytest.mark.parametrize('units', [False, True]) def test_false_alarm_smoketest(method, normalization, units): if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy, fmax = make_data(units=units) ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) assert len(fap) == len(Z) if method != 'davies': assert np.all(fap <= 1) assert np.all(fap[:-1] >= fap[1:]) # monotonically decreasing @pytest.mark.parametrize('method', sorted(METHODS)) @pytest.mark.parametrize('use_errs', [True, False]) @pytest.mark.parametrize('normalization', sorted(set(NORMALIZATIONS) - {'psd'})) @pytest.mark.parametrize('units', [False, True]) def test_false_alarm_equivalence(method, normalization, use_errs, units): # Note: the PSD normalization is not equivalent to the others, in that it # depends on the absolute errors rather than relative errors. Because the # scaling contributes to the distribution, it cannot be converted directly # from any of the three normalized versions. if not HAS_SCIPY and method in ['baluev', 'davies']: pytest.skip("SciPy required") kwds = METHOD_KWDS.get(method, None) t, y, dy, fmax = make_data(units=units) if not use_errs: dy = None ls = LombScargle(t, y, dy, normalization=normalization) freq, power = ls.autopower(maximum_frequency=fmax) Z = np.linspace(power.min(), power.max(), 30) fap = ls.false_alarm_probability(Z, maximum_frequency=fmax, method=method, method_kwds=kwds) # Compute the equivalent Z values in the standard normalization # and check that the FAP is consistent Z_std = convert_normalization(Z, len(t), from_normalization=normalization, to_normalization='standard', chi2_ref=compute_chi2_ref(y, dy)) ls = LombScargle(t, y, dy, normalization='standard') fap_std = ls.false_alarm_probability(Z_std, maximum_frequency=fmax, method=method, method_kwds=kwds) assert_allclose(fap, fap_std, rtol=0.1)
ludwiktrammer/tikn
apps/books/migrations/0008_auto_20170821_2100.py
Python
gpl-3.0
534
0.001873
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-08-21 19:00 from __future__ import unicode_literals from django.db import migrations, models class Migration
(migrations.Migration): dependencies = [ ('books', '0007_auto_20170821_2052'), ]
operations = [ migrations.AlterField( model_name='book', name='slug', field=models.SlugField(help_text='wykorzystywane w adresie strony', max_length=100, unique=True, verbose_name='identyfikator'), ), ]
drix00/pymcxray
pymcxray/FileFormat/Results/Phirhoz.py
Python
apache-2.0
2,697
0.002595
#!/usr/bin/env python """ .. py:currentmodule:: FileFormat.Results.Phirhoz .. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca> MCXRay phirhoz result file. """ # Script information for the file. __author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)" __version__ = "" __date__ = "" __copyright__ = "Copyright (c) 2012 Hendrix Demers" __license__ = "" # Subversion informations for the file. __svnRevision__ = "$Revision$" __svnDate__ = "$Date$" __svnId__ = "$Id$" # Standard library modules. # Third party modules. # Local modules. # Project modules # Globals and constants variables. KEY_SYMBOL = "symbol" KEY_SHELL = "shell" KEY_INTENSITY = "Intensity" KEY_DEPTHS_A = "depths_A" KEY_VALUES = "values" class Phirhoz(object): def __init__(self, symbol, shell): self._parameters = {} self.symbol = symbol self.shell = shell self._label = "%s [Shell %s]," % (symbol, shell) def readFromLines(self, lines): indexLine = 0 for line in lines[indexLine:]: indexLine += 1 if line.startswith(self._label): items = line.split('=') self.intensity = float(items[-1]) break self.depths_A = [] self.values = [] for _index in range(len(lines[indexLine:])): line = lines[indexLine] indexLine += 1 try: items = line.split() depth_A = float(items[0]) value = float(items[1]) self.depths_A.append(depth_A) self.values.append(value) except: break return indexLine @property def symbol(self): return self._parameters[KEY_SYMBOL] @symbol.setter def symbol(self, symbol): self._parameters[KEY_SYMBOL] = symbol @property def shell(self): return self._parameters[KEY_SHELL] @shell.setter def shell(self, shell): self._parameters[KEY_SHELL] = shell @property def intensity(self): return self._parameters[KEY_INTENSITY] @intensity
.setter def intensity(self, intensity): self._parameters[KEY_INTENSITY] = intensity @property def depths_A(self): return self._parameters[KEY_DEPTHS_A] @depths_A.setter def depths_A(self, depths_A): self._parameters[KEY_DEPTHS_A] = depths_A @property def values(self): return self._parameters[KEY_VALUES] @values.setter def values(self, values): sel
f._parameters[KEY_VALUES] = values
adelinesharla/CtrlVET
cadastro/models.py
Python
gpl-3.0
12,567
0.05463
# -*- encoding: utf-8 -*- from __future__ import unicode_literals from django.db import models from django.forms import ModelForm from django.views.generic.list import ListView,View import abc import datetime from django.utils import timezone from django.core.validators import RegexValidator from django.core.urlresolvers import reverse 'Tradução e estados para PT-BR' from django.utils.translation import ugettext_lazy as _ #from django.core.exceptions import ValidationError 'Estas classes implementam os campos de Tutor do Subsistema Secretaria e sua respectivas regras de negócio.' STATE_CHOICES = ( ('', '----'), ('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('AM', 'Amazonas'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranhão'), ('MT', 'Mato Grosso'), ('MS', 'Mato Grosso do Sul'), ('MG', 'Minas Gerais'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PR', 'Paraná'), ('PE', 'Pernambuco'), ('PI', 'Piauí'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RS', 'Rio Grande do Sul'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('SC', 'Santa Catarina'), ('SP', 'São Paulo'), ('SE', 'Sergipe'), ('TO', 'Tocantins') ) GENERO_CHOICES = ( ('', '----'), ('FE', 'Feminino'), ('MA', 'Masculino'), ) class EnderecoAbs(models.Model): _logradouro = models.CharField(verbose_name='Logradouro', max_length=200) _numero = models.PositiveSmallIntegerField(verbose_name='Número') _bairro = models.CharField(verbose_name='Bairro', max_length=20) _cidade = models.CharField(verbose_name='Cidade', max_length=200) _cep = models.CharField(verbose_name = 'CEP', max_length=15) _uf = models.CharField(verbose_name = 'UF', max_length=10, choices=STATE_CHOICES) class Meta: abstract = True class AcoesEndereco(EnderecoAbs): def __unicode__(self): return u'%s %s' % (self.logradouro, self.numero) def __str__(self): return u'%s %s' % (self.logradouro, self.numero) class Meta: verbose_name_plural = "Endereços" abstract = True
class Endereco(AcoesEndereco): def _get_logradouro(self): return self._logradouro def _get_numero(self): return self._numero def _get_bairro(self): return self._bairro def _get_cidade(self): return self._cidade def _get_cep(self): return self._ce
p def _get_uf(self): return self._uf def _set_logradouro(self,logradouro): self._logradouro = logradouro def _set_numero(self,numero): self._numero = numero def _set_bairro(self,bairro): self._bairro = bairro def _set_cidade(self,cidade): self._cidade = cidade def _set_cep(self,cep): self._cep = cep def _set_uf(self,uf): self._uf = uf logradouro = property(_get_logradouro,_set_logradouro) numero = property(_get_numero,_set_numero) bairro = property(_get_bairro,_set_bairro) cidade = property(_get_cidade,_set_cidade) cep = property(_get_cep,_set_cep) uf = property(_get_uf,_set_uf) class Meta: abstract = True class TelefoneAbs(models.Model): telefone_fixo_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="O formato do número de telefone deve ser: '+999999999'. São Permitidos até 15 dígitos.") _telefone1 = models.CharField(validators=[telefone_fixo_regex],verbose_name='Telefone de Contato 1', max_length=15,blank=True) _telefone2 = models.CharField(validators=[telefone_fixo_regex],null = True,verbose_name='Telefone de Contato 2', max_length=15,blank=True) class Meta: abstract = True class AcoesTelefone(TelefoneAbs): def __unicode__(self): return u'%s' % (self.telefone) class Meta: verbose_name_plural = "Telefones" abstract = True class Telefone(AcoesTelefone): def _get_telefone1(self): return self._telefone1 def _set_telefone1(self,telefone): self._telefone1 = telefone1 def _get_telefone2(self): return self._telefone2 def _set_telefone2(self,telefone): self._telefone2 = telefone2 telefone1 = property(_get_telefone1,_set_telefone1) telefone2 = property(_get_telefone2,_set_telefone2) class Meta: abstract = True class PessoaAbs(models.Model): _nome = models.CharField(verbose_name='Nome', max_length=50) _email = models.EmailField(verbose_name='E-Mail') _cpf = models.CharField(verbose_name='CPF', max_length=11) def __unicode__(self): return u'%s' % (self.nome) def __str__(self): return u'%s' % (self.nome) def _get_nome(self): return self._nome def _get_email(self): return self._email def _get_cpf(self): return self._cpf def _set_nome(self,nome): self._nome = nome def _set_email(self,email): self._email = email def _set_cpf(self,cpf): self._cpf = cpf nome = property(_get_nome,_set_nome) email = property(_get_email,_set_email) cpf = property(_get_cpf,_set_cpf) class Meta: abstract = True class AcoesTutor(PessoaAbs): def __unicode__(self): return u'%s' % (self.nome) class Meta: verbose_name_plural = "Tutores" abstract = True class Tutor(AcoesTutor): class Meta: abstract = True #mudar o nome para tutor_detalhe ou tutordetalhe ou tutordetalhes class TutorEndTel(Tutor, Endereco, Telefone): def get_absolute_url(self): return reverse('tutorendtel_detail', kwargs={'pk': self.pk}) class AnimalAbs(models.Model): _nome = models.CharField(verbose_name='Nome', max_length=50) _rg = models.PositiveSmallIntegerField(verbose_name='RG', unique=True, blank = True) _especie = models.CharField(verbose_name='Espécie', max_length=50) _raca = models.CharField(verbose_name='Raça', max_length=50) sexo = models.CharField(verbose_name='Sexo', max_length=15, choices=GENERO_CHOICES) _nascimento = models.DateField(verbose_name='Data de Nascimento') _obito = models.DateField(verbose_name='Data de Óbito', null = True ,blank = True) _idade = models.PositiveSmallIntegerField(verbose_name='Idade') tutor = models.ForeignKey(TutorEndTel, on_delete = models.CASCADE, related_name='animais') class Meta: verbose_name_plural = "Animais" abstract = True def get_absolute_url(self): return reverse('animal_detalhes', kwargs={'pk': self.pk}) class AcoesAnimal(AnimalAbs): def __unicode__(self): return u'%s' % (self.nome) def __str__(self): return u'%s' % (self.nome) class Meta: abstract = True class Animal(AcoesAnimal): def get_absolute_url(self): return reverse('animal_detail', kwargs={'pk': self.pk}) def _get_nome(self): return self._nome def _get_rg(self): return self._rg def _get_especie(self): return self._especie def _get_raca(self): return self._raca def _get_nascimento(self): return self._nascimento def _get_obito(self): return self._obito def _get_idade(self): return self._idade def _set_nome(self,nome): self._nome = nome def _set_rg(self,rg): self._rg = rg def _set_especie(self,especie): self._especie = especie def _set_raca(self,raca): self._raca = raca def _set_nascimento(self,nascimento): self._nascimento = nascimento def _set_obito(self,obito): self._obito = obito def _set_idade(self,idade): self._idade = idade nome = property(_get_nome,_set_nome) rg = property(_get_rg,_set_rg) especie = property(_get_especie,_set_especie) raca = property(_get_raca,_set_raca) nascimento = property(_get_nascimento,_set_nascimento) idade = property(_get_idade,_set_idade) obito = property(_get_obito,_set_obito) # referente a veterinario class AcoesVeterinario(PessoaAbs): class Meta: verbose_name_plural = "Veterinarios" abstract = True class Veterinario(AcoesVeterinario): _crmv = models.CharField(verbose_name='CRMV', max_length=10) def __unicode__(self): return u'%s' % (self.nome) def __str__(self): return u'%s' % (self.nome) def _get_crmv(self): return self._crmv def _set_crmv(self,crmv): self._crmv = crmv crmv = property(_get_crmv,_set_crmv) # referente a tecnico class AcoesTecnico(PessoaAbs): class Meta: verbose_name_plural = "Tecnicos" abstract = True class Tecnico(AcoesTecnico): _crf = models.CharField(verbose_name='
kartoza/geonode
geonode/qgis_server/migrations/0001_initial.py
Python
gpl-3.0
597
0.00335
# -*- coding: utf-8
-*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('layers', '24_to_26'), ] operations = [ migrations.CreateModel( name='QGISServerLayer', fields=[ ('layer', models.OneToOneField(primary_key=True, serialize=False, to='layers.Layer')), ('base_layer_path', models.CharField(help_text=b'Location of the base layer.'
, max_length=100, verbose_name=b'Base Layer Path')), ], ), ]
lduarte1991/edx-platform
openedx/core/djangoapps/heartbeat/default_checks.py
Python
agpl-3.0
4,002
0.004248
""" A set of built-in default checks for the platform heartbeat endpoint Other checks should be included in their respective modules/djangoapps """ from datetime import datetime, timedelta from time import sleep, time from django.conf import settings from django.core.cache import cache from django.db import connection from django.db.utils import DatabaseError from xmodule.modulestore.django import modulestore from xmodule.exceptions import HeartbeatFailure from .defaults import HEARTBEAT_CELERY_TIMEOUT from .tasks import sample_task # DEFAULT SYSTEM CHECKS # Modulestore def check_modulestore(): """ Check the modulestore connection Returns: (string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode string of either "OK" or the failure message """ # This refactoring merely delegates to the default modulestore (which if it's mixed modulestore will # delegate to all configured modulestores) and a quick test of sql. A later refactoring may allow # any service to register itself as participating in the heartbeat. It's important that all implementation # do as little as possible but give a sound determination that they are ready. try: #@TODO Do we want to parse the output for split and mongo detail and return it? modulestore().heartbeat() return 'modulestore', True, u'OK' except HeartbeatFailure as fail: return 'modulestore', False, unicode(fail) def check_database(): """ Check the database connection by attempting a no-op query Returns: (string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode string of either "OK" or the failure message """ cursor = connection.cursor() try: cursor.execute("SELECT 1") cursor.fetchone() return 'sql', True, u'OK' except DatabaseError as fail: return 'sql', False, unicode(fail) # Caching CACHE_KEY = 'heartbeat-test' CACHE_VALUE = 'abc123' def check_cache_set(): """ Check setting a cache value Returns: (string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode string of either "OK" or the failure message """ try: cache.set(CACHE_KEY, CACHE_VALUE, 30) return 'cache_set', True, u'OK' except Exception as fail: return 'cache_set', False, unicode(fail) def check_cache_get(): """ Check getting a cache value Returns: (string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode string of either "OK" or the failure message """ try: data = cache.get(CACHE_KEY) if data == CACHE_VALUE: return 'cache_get', True, u'OK' else: return 'cache_get', False, u'value check failed' except Exception as fail: return 'cache_get', False, unicode(fail) # Celery def check_celery(): """ Check running a simple asynchronous celery task Returns: (string, Boolean, unicode): A tuple containing the name of the check, whether it succeeded, and a unicode string of either "OK" or the failure message """ now = time() datetimenow = datetime.now() expires = datetimenow + timedelta(seconds=getattr(settings, 'HEARTBEAT_CELERY_TIMEOUT', H
EARTBEAT_CELERY_TIMEOUT)) try: task = sample_task.apply_async(expires=expires)
while expires > datetime.now(): if task.ready() and task.result: finished = str(time() - now) return 'celery', True, unicode({'time': finished}) sleep(0.25) return 'celery', False, "expired" except Exception as fail: return 'celery', False, unicode(fail)
zsdonghao/tensorlayer
docker/pypi_list.py
Python
apache-2.0
1,987
0.003523
import argparse import requests import logging import pip._internal if __name__ == "__main__": parser = argparse.ArgumentParser(description='Get the nth version of a given package') parser.add_argument('--package', type=str, required=True, help='The PyPI you want to inspect') parser.add_argument('--nth_last_version', type=int, default=1, help='The nth last package will be retrieved') parser.add_argument('--prerelease', help='Get PreRelease Package Version', action='store_true') parser.add_argument('--debug', help='Print debug information', action='store_true') args = parser.parse_args() logger = logging.getLogger("PyPI_CLI") formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) logger.addHandler(ch) if args.debug: logger.setLevel(logging.DEBUG) logger.debug("Package: %s" % args.package) logger.debug("nth_last_version: %s" % args.nth_last_version) logger.debug("prerelease: %s" % args.prerelease) logger.debug("debug: %s" % args.debug) finder = pip._internal.index.PackageFinder([], ['https://pypi.python.org/simple'], session=requests.Session()) results = finder.find_all_candidates(args.package) tmp_versions = [str(p.version) for p in results] logger.debug
("%s" % tmp_versions) versions = list() for el in tmp_versions: if el not in versions: versions.append(el) pos = -1 nth_version = 1 while True: fetched_version = versions[pos] logger.debug("Version: %s" % fetched_version) if nth_version == args.nth_last_version: if args.prerel
ease or not ("rc" in fetched_version or "a" in fetched_version or "b" in fetched_version): break else: pos -= 1 continue pos -= 1 nth_version += 1 print(fetched_version)
HugoKuo/keystone-essex3
keystone/backends/sqlalchemy/migrate_repo/versions/005_add_tenants_uid.py
Python
apache-2.0
996
0.002008
# pylint: disable=C0103,R0801 import sqlalchemy import migrate meta = sqlalchemy.MetaData() # define the previous state of tenants tenant = {} tenant['id'] = sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=True) tenant['name'] = sqlalchemy.Column('name', sqlalchemy.String(255), unique=True) tenant['desc'] = sqlalchemy.Column('desc', sqlalchemy.String(255)) tenant['enabled'] = sqlalchemy.Column('enabled', sqlalchemy.Integer) tenants = sqlalchemy.Table('tenants', meta, *tenant.values()) # this column will become unique/non-nullable after populating it tenant_uid = sqlalchemy.Column('uid', sqlalchemy.String(255), unique=False, nullable=True) def upgrade(migrate_engine): meta.bind = migrate_engine migrate.create_column(
tenant_uid, tenants) assert tenants.c.uid is tenant_uid def downgrade(migrate_engine): meta.bind = migrate_engine migrate.drop_column(tenant_uid, tenants)
assert not hasattr(tenants.c, 'uid')
erikjuhani/thefortressheart
cursor.py
Python
gpl-3.0
2,603
0.006915
from pygame import * from key_dict import * ''' The player class ''' class Cursor: def __init__(self, x, y, size): self.x = int(x) self.y = int(y) self.size = size self.speed = 1 self.cooldown = 0 self.block = 0 self.menu_switch = {'Build' : True} self.menu_block = { 0 : 'Wall', 1 : 'Heavy tower', 2 : 'Light tower', 3 : 'Torch', 4 : 'Farm'} def check_border(self, level, location): if location < 0 or location >= level.map_size: return False return True def update(self, keys, level, dt): self.cooldown -= 1 * dt if self.cooldown < 0: self.cooldown = 0 tile = level.terrain_map[self.x + self.y * level.map_size] for key in KEY_DICT: if keys[key] and self.cooldown == 0: if KEY_DICT[key] == 'left' and self.check_border(level, self.x - self.speed): self.x -= self.speed if KEY_DICT[key] == 'right' and self.check_border(level, self.x + self.speed): self.x += self.speed if KEY_DICT[key] == 'up' and self.check_border(level, self.y - self.speed): self.y -= self.speed if KEY_DICT[key] == 'down' and self.check_border(level, self.y + self.speed): self.y += self.speed # Toggles between building / building removal #if KEY_DICT[key] == 'switch': # self.menu_switch['Build'] = not self.menu_switch['Build'] if KEY_DICT[key] == 'block': self.block += 1 if self.block >= len(self.menu_block): self.block = 0 if KEY_DICT[key] == 'action': if self.menu_switch['Build'] and
level.gold > 0: if tile.passable: level.create_tile(self.x, self.y, self.menu_block[self.block]) elif not self.menu_switch['Build']: if not tile.passable: level.break_tile(self.x, self.y) level.gold += tile.tile_price self.cooldown = 0.2 def draw(self, screen, xoff, yoff): draw.rect(screen, (
255, 255, 255), ((self.x + xoff) * self.size, (self.y + yoff) * self.size, self.size, self.size), int(self.size/(self.size/3)))
diplomacy/research
diplomacy_research/models/policy/token_based/v004_language_model/tests/test_model.py
Python
mit
4,186
0.005256
# ============================================================================== # Copyright 2019 - Philip Paquette # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== """ Runs tests for the current model and adapter """ from diplomacy_research.models.policy.tests.policy_adapter_test_setup import PolicyAdapterTestSetup from diplomacy_research.models.policy.token_based import PolicyAdapter, BaseDatasetBuilder from diplomacy_research.models.policy.token_based.v004_language_model import PolicyModel, load_args from diplomacy_research.models.value.v001_val_relu_7 import ValueModel, load_args as load_value_args from diplomacy_research.models.self_play.algorithms.a2c import Algorithm as A2CAlgo, load_args as a2c_args from diplomacy_research.models.self_play.algorithms.ppo import Algorithm as PPOAlgo, load_args as ppo_args from diplomacy_research.models.self_play.algorithms.reinforce import Algorithm as ReinforceAlgo,\ load_args as reinforce_args from diplomacy_research.models.self_play.algorithms.tests.algorithm_test_setup import AlgorithmSetup from diplomacy_research.utils.process import run_in_separate_process # ----------- Testable Class -------------- class BaseTestClass(AlgorithmSetup): """ Tests the algorithm """ def __init__(self, algorithm_ctor, algo_load_args): """ Constructor """ Alg
orithmSetup.__init__(self, algorithm_ctor, algo_load_args, 'token_based') def get_policy_model(self): """ Returns the PolicyModel """ return PolicyModel def get_policy_builder(self): """ Returns the Policy's BaseDatasetBuilder """ return BaseDatasetBuilder def get_policy_adapter(self): """ Returns the PolicyAdapter """ r
eturn PolicyAdapter def get_policy_load_args(self): """ Returns the policy args """ return load_args() # ----------- Launch Scripts -------------- def launch_a2c(): """ Launches tests for a2c """ test_object = BaseTestClass(A2CAlgo, a2c_args) test_object.run_tests() def launch_ppo(): """ Launches tests for ppo """ test_object = BaseTestClass(PPOAlgo, ppo_args) test_object.run_tests() def launch_reinforce(): """ Launches tests for reinforce """ test_object = BaseTestClass(ReinforceAlgo, reinforce_args) test_object.run_tests() def launch_adapter(): """ Launches the tests """ testable_class = PolicyAdapterTestSetup(policy_model_ctor=PolicyModel, value_model_ctor=ValueModel, draw_model_ctor=None, dataset_builder=BaseDatasetBuilder(), policy_adapter_ctor=PolicyAdapter, load_policy_args=load_args, load_value_args=load_value_args, load_draw_args=None, strict=False) testable_class.run_tests() # ----------- Tests -------------- def test_run_a2c(): """ Runs the a2c test """ run_in_separate_process(target=launch_a2c, timeout=240) def test_run_ppo(): """ Runs the ppo test """ run_in_separate_process(target=launch_ppo, timeout=240) def test_run_reinforce(): """ Runs the reinforce test """ run_in_separate_process(target=launch_reinforce, timeout=240) def test_run_adapter(): """ Runs the adapter test """ run_in_separate_process(target=launch_adapter, timeout=240)
letsencrypt/letsencrypt
tools/venv.py
Python
apache-2.0
8,641
0.002315
#!/usr/bin/env python3 # Developer virtualenv setup for Certbot client """Aids in creating a developer virtual environment for Certbot. When this module is run as a script, it takes the arguments that should be passed to pip to install the Certbot packages as command line arguments. If no arguments are provided, all Certbot packages and their development dependencies are installed. The virtual environment will be created with the name "venv" in the current working directory. You can change the name of the virtual environment by setting the environment variable VENV_NAME. """ from __future__ import print_function import glob import os import re import shutil import subprocess import sys import time REQUIREMENTS = [ '-e acme[test]', '-e certbot[all]', '-e certbot-apache', '-e certbot-dns-cloudflare', '-e certbot-dns-cloudxns', '-e certbot-dns-digitalocean', '-e certbot-dns-dnsimple', '-e certbot-dns-dnsmadeeasy', '-e certbot-dns-gehirn', '-e certbot-dns-google', '-e certbot-dns-linode', '-e certbot-dns-luadns', '-e certbot-dns-nsone', '-e certbot-dns-ovh', '-e certbot-dns-rfc2136', '-e certbot-dns-route53', '-e certbot-dns-sakuracloud', '-e certbot-nginx', '-e certbot-compatibility-test', '-e certbot-ci', '-e letstest', ] if sys.platform == 'win32': REQUIREMENTS.append('-e windows-installer') REQUIREMENTS.remove('-e certbot-apache') REQUIREMENTS.remove('-e certbot-compatibility-test') VERSION_PATTERN = re.compile(r'^(\d+)\.(\d+).*$') class PythonExecutableNotFoundError(Exception): pass def find_python_executable() -> str: """ Find the relevant python executable that is of the given python major version. Will test, in d
ecreasing priority order: * the current Python interpreter * 'pythonX' executable in PATH (with X the given major version) if available * 'python' executable in PATH if available * Windows Python launcher 'py' executable in PATH if available Incompatible python versions for Certbot will be evicted (e.g. Python 3 vers
ions less than 3.6). :rtype: str :return: the relevant python executable path :raise RuntimeError: if no relevant python executable path could be found """ python_executable_path = None # First try, current python executable if _check_version('{0}.{1}.{2}'.format( sys.version_info[0], sys.version_info[1], sys.version_info[2])): return sys.executable # Second try, with python executables in path for one_version in ('3', '',): try: one_python = 'python{0}'.format(one_version) output = subprocess.check_output([one_python, '--version'], universal_newlines=True, stderr=subprocess.STDOUT) if _check_version(output.strip().split()[1]): return subprocess.check_output([one_python, '-c', 'import sys; sys.stdout.write(sys.executable);'], universal_newlines=True) except (subprocess.CalledProcessError, OSError): pass # Last try, with Windows Python launcher try: output_version = subprocess.check_output(['py', '-3', '--version'], universal_newlines=True, stderr=subprocess.STDOUT) if _check_version(output_version.strip().split()[1]): return subprocess.check_output(['py', env_arg, '-c', 'import sys; sys.stdout.write(sys.executable);'], universal_newlines=True) except (subprocess.CalledProcessError, OSError): pass if not python_executable_path: raise RuntimeError('Error, no compatible Python executable for Certbot could be found.') def _check_version(version_str): search = VERSION_PATTERN.search(version_str) if not search: return False version = (int(search.group(1)), int(search.group(2))) if version >= (3, 6): return True print('Incompatible python version for Certbot found: {0}'.format(version_str)) return False def subprocess_with_print(cmd, env=None, shell=False): if env is None: env = os.environ print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd) subprocess.check_call(cmd, env=env, shell=shell) def subprocess_output_with_print(cmd, env=None, shell=False): if env is None: env = os.environ print('+ {0}'.format(subprocess.list2cmdline(cmd)) if isinstance(cmd, list) else cmd) return subprocess.check_output(cmd, env=env, shell=shell) def get_venv_python_path(venv_path): python_linux = os.path.join(venv_path, 'bin/python') if os.path.isfile(python_linux): return os.path.abspath(python_linux) python_windows = os.path.join(venv_path, 'Scripts\\python.exe') if os.path.isfile(python_windows): return os.path.abspath(python_windows) raise ValueError(( 'Error, could not find python executable in venv path {0}: is it a valid venv ?' .format(venv_path))) def prepare_venv_path(venv_name): """Determines the venv path and prepares it for use. This function cleans up any Python eggs in the current working directory and ensures the venv path is available for use. The path used is the VENV_NAME environment variable if it is set and venv_name otherwise. If there is already a directory at the desired path, the existing directory is renamed by appending a timestamp to the directory name. :param str venv_name: The name or path at where the virtual environment should be created if VENV_NAME isn't set. :returns: path where the virtual environment should be created :rtype: str """ for path in glob.glob('*.egg-info'): if os.path.isdir(path): shutil.rmtree(path) else: os.remove(path) env_venv_name = os.environ.get('VENV_NAME') if env_venv_name: print('Creating venv at {0}' ' as specified in VENV_NAME'.format(env_venv_name)) venv_name = env_venv_name if os.path.isdir(venv_name): os.rename(venv_name, '{0}.{1}.bak'.format(venv_name, int(time.time()))) return venv_name def install_packages(venv_name, pip_args): """Installs packages in the given venv. :param str venv_name: The name or path at where the virtual environment should be created. :param pip_args: Command line arguments that should be given to pip to install packages :type pip_args: `list` of `str` """ # Using the python executable from venv, we ensure to execute following commands in this venv. py_venv = get_venv_python_path(venv_name) subprocess_with_print([py_venv, os.path.abspath('tools/pipstrap.py')]) command = [py_venv, os.path.abspath('tools/pip_install.py')] command.extend(pip_args) subprocess_with_print(command) if os.path.isdir(os.path.join(venv_name, 'bin')): # Linux/OSX specific print('-------------------------------------------------------------------') print('Please run the following command to activate developer environment:') print('source {0}/bin/activate'.format(venv_name)) print('-------------------------------------------------------------------') elif os.path.isdir(os.path.join(venv_name, 'Scripts')): # Windows specific print('---------------------------------------------------------------------------') print('Please run one of the following commands to activate developer environment:') print('{0}\\Scripts\\activate.bat (for Batch)'.format(venv_name)) print('.\\{0}\\Scripts\\Activate.ps1 (for Powershell)'.format(venv_name)) print('---------------------------------------------------------------------------') else: raise ValueError('Error, directory {0} is not a valid venv.'.format(venv_name)) def create_venv(venv_path): """Create a Python virtual envir
ging/horizon
openstack_dashboard/enabled/_35_help_about.py
Python
apache-2.0
250
0
# The name of the dashboard to be added to HORIZON['dashboards']. Required. DASHBOARD = 'help_about' DISABLED = Fa
lse # A list of applications to be added to INSTALLED_APPS. ADD_INSTALLED_APPS = [ 'opensta
ck_dashboard.dashboards.help_about', ]
acuriel/nahuatilli
core/urls.py
Python
gpl-3.0
840
0.007143
from django.conf.urls im
port url from core.views.generic import get_dashboard, delete from users.views.individuals import RegisterView from users.views.base import LoginView, logout_user from core.views.display import IndexView urlpatterns = [#url(r'^$', LoginView.as_view(), name='index'),
url(r'^$', IndexView.as_view(), name='index'), url(r'^login$', LoginView.as_view(), name='login'), url(r'^logout$', logout_user, name='logout'), url(r'^register$', RegisterView.as_view(), name='register'), #url(r'^delete/(?P<content_type_id>\d+)/(?P<object_id>\d+)$', delete, name='delete'), url(r'^delete$', delete, name='delete'), url(r'^dashboard$', get_dashboard, name='dashboard')]
tweiss1234/Cras
db_actions.py
Python
apache-2.0
5,095
0.005299
from requests import HTTPError from database import Database import simplejson as json db = Database.getDatabaseConnection()["cras"] from log_session import LogSession import datetime class DB: def __init__(self): pass @staticmethod def add_user(user_id, user_name, mail,picture,fcm_token): print ("DEBUG: adding user with data: " + user_id + " "+ user_name + " " + mail + " " + fcm_token) data = { "_id": user_id, "fcm_token" : fcm_token, "name": user_name, "mail": mail, "picture": picture, "supervise": [], "supervised_by" : [], "currently_monitoring" : [], "currently_monitored_by": "", "log_sessions": {} } try: db.create_document(data) except HTTPError: print "CloudantException: user alread
y exists" return data @staticmethod def get_user_by_ID(user_ID): try: return db[user_ID] except Exception: print "DB exception : User does not exists" return None @staticmethod def add_supervisor(user_i
d, other_id): user = get_user_by_id(user_id) other_user = get_user_by_id(other_id) user["supervised_by"].append(other_id) other_user["supervise"].append(user_id) user.save() other_user.save() @staticmethod def get_user_supervise(user_id): currently_monitoring = db[user_id]["currently_monitoring"] user_arr = [] for id in db[user_id]["supervise"]: current = False if id in currently_monitoring: current = True # user_arr.append({"user": get_user_by_id(id), # "status" : current}) user = get_user_by_id(id).copy() user.update({"status":current}) user_arr.append(user) return json.dumps(user_arr) @staticmethod def get_user_supervised_by(user_id): user_arr = [] for id in db[user_id]["supervised_by"]: user_arr.append(get_user_by_id(id)) return json.dumps(user_arr) @staticmethod def get_user_name(id): return db[id]["name"] @staticmethod def update_monitoring_status(user_id, sup_id, monitoring,is_sup): user = db[user_id] sup = db[sup_id] if monitoring: user["currently_monitored_by"] = sup_id sup["currently_monitoring"].append(user_id) else: if is_sup: if sup_id in user["log_sessions"]: num_of_logs = len(user["log_sessions"][sup_id]) user["log_sessions"][sup_id][num_of_logs-1].update({"end_time": str(datetime.datetime.now())}) user.save() user["currently_monitoring"].remove(sup_id) sup["currently_monitored_by"] = "" else: if user_id in sup["log_sessions"]: num_of_logs = len(sup["log_sessions"][user_id]) sup["log_sessions"][user_id][num_of_logs - 1].update({"end_time": str(datetime.datetime.now())}) sup.save() sup["currently_monitoring"].remove(user_id) user["currently_monitored_by"] = "" user.save() sup.save() @staticmethod def add_log_session(user_id,to_monitor_id): user = db[user_id] if to_monitor_id not in user["log_sessions"]: user["log_sessions"].update({to_monitor_id: []}) user["log_sessions"][to_monitor_id].append(json.loads(LogSession(datetime.datetime.now(), to_monitor_id).toJSON())) try: user.save() except Exception,e: print e @staticmethod def get_currently_monitored_by(user_id): return db[user_id]["currently_monitored_by"] @staticmethod def get_logs(user_id,sup_id): log_sessions = db[user_id]["log_sessions"] if sup_id in log_sessions: return log_sessions[sup_id] else: print "There are no logs available for: " + sup_id return None @staticmethod def add_log_event(user_id, sup_id,event): user = get_user_by_id(user_id) log_sessions = db[user_id]["log_sessions"] if sup_id in log_sessions: try: last_session = db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1] last_session["events"].append(event) db[user_id]["log_sessions"][sup_id][len(db[user_id]["log_sessions"][sup_id]) - 1] = last_session user.save() except Exception,e: print e def get_user_by_id(user_id): user = db[user_id] return user def db_exists(user_id): try: user = db[user_id] except Exception,e: return False print "DEBUG: the name is : " + user["name"] return user.json() def get_fcm_token(user_id): return db[user_id]["fcm_token"]
GCStokum/ECS-Game
ecs-0.1/docs/source/conf.py
Python
mit
8,695
0.00483
# -*- coding: utf-8 -*- # # This file is based upon the file generated by sphinx-quickstart. However, # where sphinx-quickstart hardcodes values in this file that you input, this # file has been changed to pull from your module's metadata module. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a def
ault; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root,
use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../..')) # Import project metadata from ecs import metadata # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] # show todos todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = metadata.project copyright = metadata.copyright # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = metadata.version # The full version, including alpha/beta/rc tags. release = metadata.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = metadata.project_no_spaces + 'doc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, # documentclass [howto/manual]). latex_documents = [ ('index', metadata.project_no_spaces + '.tex', metadata.project + ' Documentation', metadata.authors_string, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', metadata.package, metadata.project + ' Documentation', metadata.authors_string, 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', metadata.project_no_spaces, metadata.project + ' Documentation', metadata.authors_string, metadata.project_no_spaces, metadata.description, 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'f
artwr/airflow
airflow/contrib/operators/gcs_operator.py
Python
apache-2.0
5,140
0.000778
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults from airflow.version import version class GoogleCloudStorageCreateBucketOperator(BaseOperator): """ Creates a new bucket. Google Cloud Storage uses a flat namespace, so you can't create a bucket with a name that is already in use. .. seealso:: For more information, see Bucket Naming Guidelines: https://cloud.google.com/storage/docs/bucketnaming.html#requirements :param bucket_name: The name of the bucket. (templated) :type bucket_name: str :param resource: An optional dict with parameters for creating the bucket. For information on available parameters, see Cloud Storage API doc: https://cloud.google.com/storage/docs/json_api/v1/buckets/insert :type resource: dict :param storage_class: This defines how objects in the bucket are stored and determines the SLA and the cost of storage (templated). Values include - ``MULTI_REGIONAL`` - ``REGIONAL`` - ``STANDARD`` - ``NEARLINE`` - ``COLDLINE``. If this value is not specified when the bucket is created, it will default to STANDARD. :type storage_class: str :param location: The location of the bucket. (templated) Object data for objects in the bucket resides in physical storage within this region. Defaults to US. .. seealso:: https://developers.google.com/storage/docs/bucket-locations :type location: str :param project_id: The ID of the GCP Project. (templated) :type project_id: str :param labels: User-provided labels, in key/value pairs. :type labels: dict :param google_cloud_storage_conn_id: The connection ID to use when connecting to Google cloud storage. :type google_cloud_storage_conn_id: str :param delegate_to: The account to impersonate, if any. For this to work, the service account making the request must have domain-wide delegation enabled. :type delegate_to: str :Ex
ample:: The
following Operator would create a new bucket ``test-bucket`` with ``MULTI_REGIONAL`` storage class in ``EU`` region :: CreateBucket = GoogleCloudStorageCreateBucketOperator( task_id='CreateNewBucket', bucket_name='test-bucket', storage_class='MULTI_REGIONAL', location='EU', labels={'env': 'dev', 'team': 'airflow'}, google_cloud_storage_conn_id='airflow-service-account' ) """ template_fields = ('bucket_name', 'storage_class', 'location', 'project_id') ui_color = '#f0eee4' @apply_defaults def __init__(self, bucket_name, resource=None, storage_class='MULTI_REGIONAL', location='US', project_id=None, labels=None, google_cloud_storage_conn_id='google_cloud_default', delegate_to=None, *args, **kwargs): super(GoogleCloudStorageCreateBucketOperator, self).__init__(*args, **kwargs) self.bucket_name = bucket_name self.resource = resource self.storage_class = storage_class self.location = location self.project_id = project_id self.labels = labels self.google_cloud_storage_conn_id = google_cloud_storage_conn_id self.delegate_to = delegate_to def execute(self, context): if self.labels is not None: self.labels.update( {'airflow-version': 'v' + version.replace('.', '-').replace('+', '-')} ) hook = GoogleCloudStorageHook( google_cloud_storage_conn_id=self.google_cloud_storage_conn_id, delegate_to=self.delegate_to ) hook.create_bucket(bucket_name=self.bucket_name, resource=self.resource, storage_class=self.storage_class, location=self.location, project_id=self.project_id, labels=self.labels)
dumoulinj/ers
ers_backend/emotion_annotator/serializers.py
Python
mit
267
0.007491
from rest_framework import serializers from emotion_annotator.
models import FrameEmotions class FrameEmotionsSerializer(serializ
ers.HyperlinkedModelSerializer): class Meta: model = FrameEmotions fields = ('video', 'frameTime', 'emotionType')
lbouma/Cyclopath
pyserver/bin/rpy2/rinterface/tests/test_Sexp.py
Python
apache-2.0
4,549
0.005935
import unittest import copy import gc import rpy2.rinterface as rinterface rinterface.initr() class SexpTestCase(unittest.TestCase): def testNew_invalid(self): x = "a" self.assertRaises(ValueError, rinterface.Sexp, x) def testNew(self): sexp = rinterface.baseenv.get("letters") sexp_new = rinterface.Sexp(sexp) idem = rinterface.baseenv.get("identical") self.assertTrue(idem(sexp, sexp_new)[0]) sexp_new2 = rinterface.Sexp(sexp) self.assertTrue(idem(sexp, sexp_new2)[0]) del(sexp) self.assertTrue(idem(sexp_new, sexp_new2)[0]) def testTypeof_get(self): sexp = rinterface.baseenv.get("letters") self.assertEquals(sexp.typeof, rinterface.STRSXP) sexp = rinterface.baseenv.get("pi") self.assertEquals(sexp.typeof, rinterface.REALSXP) sexp = rinterface.baseenv.get("plot") self.assertEquals(sexp.typeof, rinterface.CLOSXP) def testDo_slot(self): data_func = rinterface.baseenv.get("data") data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP)) sexp = rinterface.globalenv.get("iris") names = sexp.do_slot("names") iris_names = ("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width", "Species") self.assertEquals(len(iris_names), len(names)) for i, n in enumerate(iris_names): self.assertEquals(iris_names[i], names[i]) self.assertRaises(LookupError, sexp.do_slot, "foo") def testDo_slot_assign(self): data_func = rinterface.baseenv.get("data") data_func(rinterface.SexpVector(["iris", ], rinterface.STRSXP)) sexp = rinterface.globalenv.get("iris") iris_names = rinterface.StrSexpVector(['a', 'b', 'c', 'd', 'e']) sexp.do_slot_assign("names", iris_names) names = [x for x in sexp.do_slot("names")] self.assertEquals(['a', 'b', 'c', 'd', 'e'], names) def testDo_slot_assign_create(self): #test that assigning slots is also creating the slot x = rinterface.IntSexpVector([1,2,3]) x.do_slot_assign("foo", rinterface.StrSexpVector(["bar", ])) slot = x.do_slot("foo") self.assertEquals(1, len(slot)) self.as
sertEquals("bar", slot[0]) def testSexp_rsame_true(self): sexp_a = rinterface.baseenv.get("letters") sexp_b = rinterface.baseenv.get("letters") self.assertTrue(sexp_a.rsame(sexp_b)) def testSexp_rsame_false(self): sexp_a = rinterface.baseenv.get("letters") sexp_b = rinterface.baseenv.get("pi") self.assertFalse(sexp_a.rsame(sexp_b)) def testSexp_rsame_wrongType(self): se
xp_a = rinterface.baseenv.get("letters") self.assertRaises(ValueError, sexp_a.rsame, 'foo') def testSexp_sexp(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ sexp = rinterface.IntSexpVector([4,5,6,7]) self.assertEquals(4, len(sexp)) sexp.__sexp__ = cobj self.assertEquals(3, len(sexp)) def testSexp_sexp_wrongtypeof(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ sexp = rinterface.StrSexpVector(['a', 'b']) self.assertEquals(2, len(sexp)) self.assertRaises(ValueError, sexp.__setattr__, '__sexp__', cobj) def testSexp_sexp_destroyCobj(self): sexp = rinterface.IntSexpVector([1,2,3]) cobj = sexp.__sexp__ del(cobj) gc.collect() # no real test, just make sure that it does # not cause a segfault def testSexp_deepcopy(self): sexp = rinterface.IntSexpVector([1,2,3]) self.assertEquals(0, sexp.named) rinterface.baseenv.get("identity")(sexp) self.assertEquals(2, sexp.named) sexp2 = sexp.__deepcopy__() self.assertEquals(sexp.typeof, sexp2.typeof) self.assertEquals(list(sexp), list(sexp2)) self.assertFalse(sexp.rsame(sexp2)) self.assertEquals(0, sexp2.named) # should be the same as above, but just in case: sexp3 = copy.deepcopy(sexp) self.assertEquals(sexp.typeof, sexp3.typeof) self.assertEquals(list(sexp), list(sexp3)) self.assertFalse(sexp.rsame(sexp3)) self.assertEquals(0, sexp3.named) def suite(): suite = unittest.TestLoader().loadTestsFromTestCase(SexpTestCase) return suite if __name__ == '__main__': tr = unittest.TextTestRunner(verbosity = 2) tr.run(suite())
kashif/chainer
chainer/functions/array/flatten.py
Python
mit
1,211
0
from chainer import function class Flatten(function.Function): """Flatten function.""" def forward(self, inputs): self.retain_inputs(()) self._in_shape = inputs[0].shape return inputs[0].ravel(), def backward(self, inputs, grads): return grads[0].reshape(self._in_shape), def flatten(x): """Flatten a given array
into one dimension. Args: x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \ :class:`cupy.ndarray`): Input variable. Returns: ~chai
ner.Variable: Output variable flatten to one dimension. .. note:: When you input a scalar array (i.e. the shape is ``()``), you can also get the one dimension array whose shape is ``(1,)``. .. admonition:: Example >>> x = np.array([[1, 2], [3, 4]]) >>> x.shape (2, 2) >>> y = F.flatten(x) >>> y.shape (4,) >>> y.data array([1, 2, 3, 4]) >>> x = np.arange(8).reshape(2, 2, 2) >>> x.shape (2, 2, 2) >>> y = F.flatten(x) >>> y.shape (8,) >>> y.data array([0, 1, 2, 3, 4, 5, 6, 7]) """ return Flatten()(x)
drix00/pymcxray
pymcxray/FileFormat/SimulationParameters.py
Python
apache-2.0
15,335
0.004695
#!/usr/bin/env python """ .. py:currentmodule:: FileFormat.SimulationParameters .. moduleauthor:: Hendrix Demers <hendrix.demers@mail.mcgill.ca> MCXRay simulation parameters input file. """ # Script information for the file. __author__ = "Hendrix Demers (hendrix.demers@mail.mcgill.ca)" __version__ = "" __date__ = "" __copyright__ = "Copyright (c) 2012 Hendrix Demers" __license__ = "" # Subversion informations for the file. __svnRevision__ = "$Revision$" __svnDate__ = "$Date$" __svnId__ = "$Id$" # Standard library modules. import copy # Third party modules. # Local modules. # Project modules import pymcxray.FileFormat.MCXRayModel as MCXRayModel import pymcxray.FileFormat.Version as Version # Globals and constants variables. KEY_BASE_FILENAME = "BaseFileName" KEY_NUMBER_ELECTRONS = "ElectronNbr" KEY_NUMBER_PHOTONS = "PhotonNbr" KEY_NUMBER_WINDOWS = "WindowNbr" KEY_NUMBER_FILMS_X = "FilmNbrX" KEY_NUMBER_FILMS_Y = "FilmNbrY" KEY_NUMBER_FILMS_Z = "FilmNbrZ" KEY_NUMBER_CHANNELS = "SpectraChannel" KEY_ENERGY_CHANNEL_WIDTH = "EnergyChannelWidth" KEY_SPECTRA_INTERPOLATION_MODEL = "SpectraInterpolation" KEY_VOXEL_SIMPLIFICATION = "VoxelSimplification" KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR = "ElasticCrossSectionScalingFactor" KEY_ENERGY_LOSS_SCALING_FACTOR = "EnergyLossScalingFactor" class SimulationParameters(object): def __init__(self): self.version = copy.deepcopy(Version.CURRENT_VERSION) self._keys = self._createKeys() self._parameters = {} self.defaultValues() def _createKeys(self): keys = [] keys.append(KEY_BASE_FILENAME) keys.append(KEY_NUMBER_ELECTRONS) keys.append(KEY_NUMBER_PHOTONS) keys.append(KEY_NUMBER_WINDOWS) keys.append(KEY_NUMBER_FILMS_X) keys.append(KEY_NUMBER_FILMS_Y) keys.append(KEY_NUMBER_FILMS_Z) if self.version == Version.BEFORE_VERSION: keys.append(KEY_NUMBER_CHANNELS) else: keys.append(KEY_ENERGY_CHANNEL_WIDTH) keys.append(KEY_SPECTRA_INTERPOLATION_MODEL) keys.append(KEY_VOXEL_SIMPLIFICATION) if self.version >= Version.VERSION_1_4_4: keys.append(KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR) keys.append(KEY_ENERGY_LOSS_SCALING_FACTOR) return keys def defaultValues(self): baseFilenameRef = r"Results\McXRay" self.baseFilename = baseFilenameRef self.numberElectrons = 1000 self.numberPhotons = 10000 self.numberWindows = 64 self.numberFilmsX = 128 self.numberFilmsY = 128 self.numberFilmsZ = 128 self.numberChannels = 1024 self.energyChannelWidth_eV = 5.0 self.spectrumInterpolationModel = MCXRayModel.SpectrumInterpolationModel.TYPE_LINEAR_DOUBLE self.voxelSimplification = None self.elasticCrossSectionScalingFactor = 1.0 self.energyLossScalingFactor = 1.0 def _createExtractMethod(self): extractMethods = {} extractMethods[KEY_BASE_FILENAME] = str extractMethods[KEY_NUMBER_ELECTRONS] = int extractMethods[KEY_NUMBER_PHOTONS] = int extractMethods[KEY_NUMBER_WINDOWS] = int extractMethods[KEY_NUMBER_FILMS_X] = int extractMethods[KEY_NUMBER_FILMS_Y] = int extractMethods[KEY_NUMBER_FILMS_Z] = int extractMethods[KEY_NUMBER_CHANNELS] = int extractMethods[KEY_ENERGY_CHANNEL_WIDTH] = float extractMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = self._extractSpectrumInterpolationModel extractMethods[KEY_VOXEL_SIMPLIFICATION] = bool extractMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = float extractMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = float return extractMethods def _createFormatMethod(self): fromatMethods = {} fromatMethods[KEY_BASE_FILENAME] = "%s" fromatMethods[KEY_NUMBER_ELECTRONS] = "%i" fromatMethods[KEY_NUMBER_PHOTONS] = "%i"
fromatMethods[
KEY_NUMBER_WINDOWS] = "%i" fromatMethods[KEY_NUMBER_FILMS_X] = "%i" fromatMethods[KEY_NUMBER_FILMS_Y] = "%i" fromatMethods[KEY_NUMBER_FILMS_Z] = "%i" fromatMethods[KEY_NUMBER_CHANNELS] = "%i" fromatMethods[KEY_ENERGY_CHANNEL_WIDTH] = "%s" fromatMethods[KEY_SPECTRA_INTERPOLATION_MODEL] = "%s" fromatMethods[KEY_VOXEL_SIMPLIFICATION] = "%s" fromatMethods[KEY_ELASTIC_CROSS_SECTION_SCALING_FACTOR] = "%.5f" fromatMethods[KEY_ENERGY_LOSS_SCALING_FACTOR] = "%.5f" return fromatMethods def _extractSpectrumInterpolationModel(self, text): model = MCXRayModel.SpectrumInterpolationModel(int(text)) return model def read(self, filepath): self.version.readFromFile(filepath) lines = open(filepath, 'r').readlines() extractMethods = self._createExtractMethod() for line in lines: line = line.strip() for key in self._keys: if line.startswith(key): items = line.split('=') self._parameters[key] = extractMethods[key](items[-1]) def write(self, filepath): outputFile = open(filepath, 'w') self._writeHeader(outputFile) self.version.writeLine(outputFile) formatMethods = self._createFormatMethod() keys = self._createKeys() for key in keys: if key == KEY_SPECTRA_INTERPOLATION_MODEL: value = formatMethods[key] % (self._parameters[key].getModel()) else: value = formatMethods[key] % (self._parameters[key]) if value is not None and value != "None": line = "%s=%s\n" % (key, value) outputFile.write(line) def _writeHeader(self, outputFile): if self._parameters[KEY_VOXEL_SIMPLIFICATION] is not None: headerLines = [ "********************************************************************************", "*** SIMULATION PARAMETERS", "***", "*** BaseFileName = All output files will be named using this term", "*** ElectronNbr = Total number of electrons to simulate", "*** PhotonNbr = Total number of photons to simulate in EDS", "*** WindowNbr = Number of energy windows in PhiRo computations", "*** FilmNbrX = Number of X layers in PhiRo computations", "*** FilmNbrY = Number of Y layers in PhiRo computations", "*** FilmNbrZ = Number of Z layers in PhiRo computations", "*** SpectraChannel = Number of channels in spectraa", "*** SpectraInterpolation = Interpolation type for spectras", "*** VoxelSimplification = Use only middle voxel of trajectories to store energy", "***", "********************************************************************************"] elif self.version == Version.BEFORE_VERSION: headerLines = [ "********************************************************************************", "*** SIMULATION PARAMETERS", "***", "*** BaseFileName = All output files will be named using this term", "*** ElectronNbr = Total number of electrons to simulate", "*** PhotonNbr = Total number of photons to simulate in EDS", "*** WindowNbr = Number of energy windows in PhiRo computatio
jolyonb/edx-platform
lms/djangoapps/instructor/tests/test_email.py
Python
agpl-3.0
7,373
0.003119
""" Unit tests for email feature flag in new instructor dashboard. Additionally tests that bulk email is always disabled for non-Mongo backed courses, regardless of email feature flag, and that the view is conditionally available when Course Auth is turned on. """ from __future__ import absolute_import from django.urls import reverse from opaque_keys.edx.keys import CourseKey from six import text_type from bulk_email.models import BulkEmailFlag, CourseAuthorization from student.tests.
factories import AdminFactory from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_MODULESTORE, SharedModuleStoreTestCase from xmodule.modulestore.tests.factories import CourseFactory class TestNewInstructorDashboardEmailViewMongoBacked(SharedModuleStoreTestCase): """ Check for email view on the new instructor dashboard for Mongo-backed courses """ @classmethod def setUpClass(cls): super(TestNewInstructorDashboardEmailViewM
ongoBacked, cls).setUpClass() cls.course = CourseFactory.create() # URL for instructor dash cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course.id)}) # URL for email view cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>' def setUp(self): super(TestNewInstructorDashboardEmailViewMongoBacked, self).setUp() # Create instructor account instructor = AdminFactory.create() self.client.login(username=instructor.username, password="test") def tearDown(self): super(TestNewInstructorDashboardEmailViewMongoBacked, self).tearDown() BulkEmailFlag.objects.all().delete() # In order for bulk email to work, we must have both the BulkEmailFlag.is_enabled() # set to True and for the course to be Mongo-backed. # The flag is enabled and the course is Mongo-backed (should work) def test_email_flag_true_mongo_true(self): BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False) # Assert that instructor email is enabled for this course - since REQUIRE_COURSE_EMAIL_AUTH is False, # all courses should be authorized to use email. self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id)) # Assert that the URL for the email view is in the response response = self.client.get(self.url) self.assertIn(self.email_link, response.content) send_to_label = '<div class="send_to_list">Send to:</div>' self.assertIn(send_to_label, response.content) self.assertEqual(response.status_code, 200) # The course is Mongo-backed but the flag is disabled (should not work) def test_email_flag_false_mongo_true(self): BulkEmailFlag.objects.create(enabled=False) # Assert that the URL for the email view is not in the response response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content) # Flag is enabled, but we require course auth and haven't turned it on for this course def test_course_not_authorized(self): BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True) # Assert that instructor email is not enabled for this course self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id)) # Assert that the URL for the email view is not in the response response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content) # Flag is enabled, we require course auth and turn it on for this course def test_course_authorized(self): BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=True) # Assert that instructor email is not enabled for this course self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id)) # Assert that the URL for the email view is not in the response response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content) # Authorize the course to use email cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True) cauth.save() # Assert that instructor email is enabled for this course self.assertTrue(BulkEmailFlag.feature_enabled(self.course.id)) # Assert that the URL for the email view is in the response response = self.client.get(self.url) self.assertIn(self.email_link, response.content) # Flag is disabled, but course is authorized def test_course_authorized_feature_off(self): BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=True) # Authorize the course to use email cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True) cauth.save() # Assert that this course is authorized for instructor email, but the feature is not enabled self.assertFalse(BulkEmailFlag.feature_enabled(self.course.id)) self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id)) # Assert that the URL for the email view IS NOT in the response response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content) class TestNewInstructorDashboardEmailViewXMLBacked(SharedModuleStoreTestCase): """ Check for email view on the new instructor dashboard """ MODULESTORE = TEST_DATA_MIXED_MODULESTORE @classmethod def setUpClass(cls): super(TestNewInstructorDashboardEmailViewXMLBacked, cls).setUpClass() cls.course_key = CourseKey.from_string('edX/toy/2012_Fall') # URL for instructor dash cls.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(cls.course_key)}) # URL for email view cls.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>' def setUp(self): super(TestNewInstructorDashboardEmailViewXMLBacked, self).setUp() # Create instructor account instructor = AdminFactory.create() self.client.login(username=instructor.username, password="test") # URL for instructor dash self.url = reverse('instructor_dashboard', kwargs={'course_id': text_type(self.course_key)}) # URL for email view self.email_link = '<button type="button" class="btn-link send_email" data-section="send_email">Email</button>' def tearDown(self): super(TestNewInstructorDashboardEmailViewXMLBacked, self).tearDown() BulkEmailFlag.objects.all().delete() # The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should # be authorized to use email. But the course is not Mongo-backed (should not work) def test_email_flag_true_mongo_false(self): BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False) response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content) # The flag is disabled and the course is not Mongo-backed (should not work) def test_email_flag_false_mongo_false(self): BulkEmailFlag.objects.create(enabled=False, require_course_email_auth=False) response = self.client.get(self.url) self.assertNotIn(self.email_link, response.content)
ebigelow/LOTlib
LOTlib/Hypotheses/Lexicon/RecursiveLexicon.py
Python
gpl-3.0
1,491
0.004695
from SimpleLexicon import SimpleLexicon from LOTlib.Evaluation.Evalu
ationException import RecursionDepthException class RecursiveLexicon(SimpleLexicon): """ A lexicon where word meanings can call each other. Analogous to a RecursiveLOTHypothesis from a LOTHypoth
esis. To achieve this, we require the LOThypotheses in self.values to take a "recurse" call that is always passed in by default here on __call__ as the first argument. This throws a RecursionDepthException when it gets too deep. See Examples.EvenOdd """ def __init__(self, recursive_depth_bound=10, *args, **kwargs): self.recursive_depth_bound = recursive_depth_bound SimpleLexicon.__init__(self, *args, **kwargs) def __call__(self, word, *args): """ Wrap in self as a first argument that we don't have to in the grammar. This way, we can use self(word, X Y) as above. """ self.recursive_call_depth = 0 return self.value[word](self.recursive_call, *args) # pass in "self" as lex, using the recursive version def recursive_call(self, word, *args): """ This gets called internally on recursive calls. It keeps track of the depth to allow us to escape """ self.recursive_call_depth += 1 if self.recursive_call_depth > self.recursive_depth_bound: raise RecursionDepthException # print ">>>", self.value[word] return self.value[word](self.recursive_call, *args)
GuessWhoSamFoo/pandas
pandas/testing.py
Python
bsd-3-clause
158
0
# flake
8: noqa """ Public testing utility fu
nctions. """ from pandas.util.testing import ( assert_frame_equal, assert_index_equal, assert_series_equal)
mozilla/telemetry-analysis-service
atmo/clusters/migrations/0021_rename_cluster_emr_release.py
Python
mpl-2.0
1,083
0.001847
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2017-03-21 12:36 from __future__ import unicode_literals from django.db import migrations, models impor
t django.db.models.deletion class Migration(migrations.Migration):
dependencies = [("clusters", "0020_emr_release_model")] operations = [ migrations.RenameField( model_name="cluster", old_name="emr_release", new_name="emr_release_version" ), migrations.AddField( model_name="cluster", name="emr_release", field=models.ForeignKey( blank=True, help_text='Different AWS EMR versions have different versions of software like Hadoop, Spark, etc. See <a href="http://docs.aws.amazon.com/emr/latest/ReleaseGuide/emr-whatsnew.html">what\'s new</a> in each.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name="created_clusters", to="clusters.EMRRelease", verbose_name="EMR release", ), ), ]
ammaritiz/pulp_puppet
pulp_puppet_extensions_consumer/pulp_puppet/extensions/consumer/structure.py
Python
gpl-2.0
635
0
from gett
ext import gettext as _ SECTION_ROOT = 'puppet' DESC_ROOT = _('manage Puppet bindings') def ensure_puppet_root(cli):
""" Verifies that the root of puppet-related commands exists in the CLI, creating it using constants from this module if it does not. :param cli: CLI instance being configured :type cli: pulp.client.extensions.core.PulpCli """ root_section = cli.find_section(SECTION_ROOT) if root_section is None: root_section = cli.create_section(SECTION_ROOT, DESC_ROOT) return root_section def root_section(cli): return cli.root_section.find_subsection(SECTION_ROOT)
sb2gh/flask_login_1
app/__init__.py
Python
mit
988
0
from flask import Flask from flask.ext.bootstrap import Bootstrap from flask.ext.mail import Mail from flask.ext.moment import Moment from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.login import LoginManager from conf
ig import config bootstrap = Bootstrap() mail = Mail() moment = Moment() db = SQLAlchemy() login_manager = LoginManager() login_manager.session_protection = 'strong' login_manager.login_view = 'auth.login' def create_app(config_name): app = Flask(__name__) print 'in app __init__.py', config_name, config[config_name] app.config.from_object(config[config_name]) config[c
onfig_name].init_app(app) bootstrap.init_app(app) mail.init_app(app) moment.init_app(app) db.init_app(app) login_manager.init_app(app) from .main import main as main_blueprint app.register_blueprint(main_blueprint) from .auth import auth as auth_blueprint app.register_blueprint(auth_blueprint, url_prefix='/auth') return app
josherich/mindynode-parsers
mindynode_nltk/utils/opencc.py
Python
mit
117
0.025641
im
port subprocess def convert_c
hinese(text): return subprocess.getoutput("echo '%s' | opencc -c hk2s.json" % text)
Mohamad1994HD/LinkArchiever
app/createDBTables.py
Python
gpl-3.0
1,118
0.006261
import sqlite3 from config import appConfig def createTBLS(path=None): conn = sqlite3.connect(path) cursor = conn.cursor() cursor.execute("""CREATE TABLE links(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, name TEXT NOT NULL );""") cursor.execute("""CREATE TABLE tags(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, tag TEXT NOT NULL );""") cursor.execute("""CREATE TABLE assc(id INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL,
links_id INTEGER NOT NULL, tags_id INTEGER NOT NULL, FOREIGN KEY (links_id) REFERENCES links(id), FOREIGN KEY (tags_id) REFERENCES tags(id)
);""") conn.commit() conn.close() if __name__ == '__main__': try: path = appConfig.db_path print path createTBLS(str(path)) except IOError as e: print (str(e))
simontakite/sysadmin
pythonscripts/thinkpython/CellWorld.py
Python
gpl-2.0
6,080
0.004441
"""This module is part of Swampy, a suite of programs available from allendowney.com/swampy. Copyright 2011 Allen B. Downey Distributed under the GNU General Public License at gnu.org/licenses/gpl.html. """ import math from World import World class CellWorld(World): """Contains cells and animals that move between cells.""" def __init__(self, canvas_size=500, cell_size=5, interactive=False): World.__init__(self) self.title('CellWorld') self.canvas_size = canvas_size self.cell_size = cell_size # cells is a map from index tuples to Cell objects self.cells = {} if interactive: self.make_canvas() self.make_control() def make_canvas(self): """Creates the GUI.""" self.canvas = self.ca(width=self.canvas_size, height=self.canvas_size, bg='white', scale = [self.cell_size, self.cell_size]) def make_control(self): """Adds GUI elements that allow the user to change the scale.""" self.la(text='Click or drag on the canvas to create cells.') self.row([0,1,0]) self.la(text='Cell size: ') self.cell_size_en = self.en(width=10, text=str(self.cell_size)) self.bu(text='resize', command=self.rescale) self.endrow() def bind(self): """Creates bindings for the canvas.""" self.canvas.bind('<ButtonPress-1>', self.click) self.canvas.bind('<B1-Motion>', self.click) def click(self, event): """Event handler for clicks and drags. It creates a new cell or toggles an existing cell. """ # convert the button click coordinates to an index tuple x, y = self.canvas.invert([event.x, event.y]) i, j = int(math.floor(x)), int(math.floor(y)) # toggle the cell if it exists; create it otherwise cell = self.get_cell(i,j) if cell: cell.toggle() else: self.make_cell(x, y) def make_cell(self, i, j): """Creates and returns a new cell at i,j.""" cell = Cell(self, i, j) self.cells[i,j] = cell return cell def cell_bounds(self, i, j): """Return the bounds of the cell with indices i, j.""" p1 = [i, j] p2 = [i+1, j] p3 = [i+1, j+1] p4 = [i, j+1] bounds = [p1, p2, p3, p4] return bounds def get_cell(self, i, j, default=None): """Gets the cell at i, j or returns the default value.""" cell = self.cells.get((i,j), default) return cell four_neighbors = [(1,0), (-1,0), (0,1), (0,-1)] eight_neighbors = four_neighbors + [(1,1), (1,-1), (-1,1), (-1,-1)] def get_four_neighbors(self, cell, default=None): """Return the four Von Neumann neighbors of a cell.""" return self.get_neighbors(cell, default, CellWorld.four_neighbors) def get_eight_neighbors(self, cell, default=None): """Returns the eight Moore neighbors of a cell.""" return self.get_neighbors(cell, default, CellWorld.eight_neighbors) def get_neighbors(self, cell, default=None, deltas=[(0,0)]): """Return the neighbors of a cell. Args: cell: Cell deltas: a list of tuple offsets. """ i, j = cell.indices cells = [self.get_cell(i+di, j+dj, default) for di, dj in deltas] return cells def rescale(self): """Event handler that rescales the world. Reads the new scale from the GUI, changes the canvas transform, and redraws the world. """ cell_size = self.cell_size_en.get() cell_size = int(cell_size) self.canvas.transforms[0].scale = [cell_size, cell_size] self.redraw() def redraw(self): """Clears the canvas and redraws all cells and animals.""" self.canvas.clear() for cell in self.cells.itervalues(): cell.draw() for animal in self.animals: animal.draw() class Cell(object): """A rectangular region in CellWorld""" def __init__(self, world, i, j): self.world = world self.indices = i, j self.bounds = self.world.cell_bounds(i, j) # options used for a marked cell self.marked_options = dict(fill='black', outline='gray80') # options used for an unmarked cell self.unmarked_options = dict(fill='yellow', outline='gray80') self.marked = False self.draw() def draw(self): """Draw the cell.""" if
self.marked: options = self.marked_options else: options = self.unmarked_options # bounds returns all four corners, so slicing every other # element yields two opposing corners, which is what we # pass to Canvas.rectangle coords = self.bounds[::2] self.item = self.world.canvas.rectangle(coords, **options) def undraw(sel
f): """Delete any items with this cell's tag.""" self.item.delete() self.item = None def get_config(self, option): """Gets the configuration of this cell.""" return self.item.cget(option) def config(self, **options): """Configure this cell with the given options.""" self.item.config(**options) def mark(self): """Marks this cell.""" self.marked = True self.config(**self.marked_options) def unmark(self): """Unmarks this cell.""" self.marked = False self.config(**self.unmarked_options) def is_marked(self): """Checks whether this cell is marked.""" return self.marked def toggle(self): """Toggles the state of this cell.""" if self.is_marked(): self.unmark() else: self.mark() if __name__ == '__main__': world = CellWorld(interactive=True) world.bind() world.mainloop()
PisiLinux-PyQt5Port/package-manager
src/settingsdialog.py
Python
gpl-2.0
20,739
0.003809
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2009-2010 TUBITAK/UEKAE # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # Please read the COPYING file import re from os import path from PyQt5.QtWidgets import QMessageBox from PyQt5.QtWidgets import QDialog from PyQt5.QtWidgets import QTableWidgetItem from PyQt5.QtWidgets import QCheckBox from PyQt5.QtWidgets import QFileDialog from PyQt5.QtGui import QDesktopServices from PyQt5.QtNetwork import QNetworkProxy from PyQt5.QtCore import * import pds import config import helpdialog import repodialog import pmutils import backend from ui_settingsdialog import Ui_SettingsDialog from pmutils import * class SettingsTab(QObject): def __init__(self, settings): QObject.__init__(self) self.settings = settings self.config = config.PMConfig() self.iface = backend.pm.Iface() self.setupUi() self.connectSignals() self.changed = False def markChanged(self): self.changed = True def setupUi(self): pass def connectSignals(self): pass def save(self): pass def initialize(self): pass class GeneralSettings(SettingsTab): def setupUi(self): self.settings.moveUpButton.setIcon(KIcon("up")) self.settings.moveDownButton.setIcon(KIcon("down")) self.settings.addRepoButton.setIcon(KIcon(("list-add", "add"))) self.settings.removeRepoButton.setIcon(KIcon(("list-remove", "remove"))) self.initialize() def initialize(self): self.settings.onlyGuiApp.setChecked(self.config.showOnlyGuiApp()) self.settings.showComponents.setChecked(self.config.showComponents()) self.settings.showIsA.setChecked(self.config.showIsA()) self.settings.intervalCheck.setChecked(self.config.updateCheck()) self.settings.installUpdates.setChecked(self.config.installUpdatesAutomatically()) self.settings.intervalSpin.setValue(self.config.updateCheckInterval()) self.settings.systemTray.setChecked(self.config.systemTray()) self.settings.hideIfNoUpdate.setChecked(self.config.hideTrayIfThereIsNoUpdate()) def connectSignals(self): self.settings.onlyGuiApp.toggled.connect(self.markChanged) self.settings.showComponents.toggled.connect(self.markChanged) self.settings.showIsA.toggled.connect(self.markChanged) self.settings.intervalCheck.toggled.connect(self.markChanged) self.settings.intervalSpin.valueChanged.connect(self.markChanged) self.settings.installUpdates.toggled.connect(self.markChanged) self.settings.systemTray.toggled.connect(self.markChanged) self.settings.hideIfNoUpdate.toggled.connect(self.markChanged) def save(self): if not self.settings.onlyGuiApp.isChecked() == self.config.showOnlyGuiApp(): self.config.setShowOnlyGuiApp(self.settings.onlyGuiApp.isChecked()) self.settings.packagesChanged.emit() if not self.settings.showComponents.isChecked() == self.config.showComponents(): self.config.setShowComponents(self.settings.showComponents.isChecked()) self.settings.packageViewChanged.emit() if not self.settings.showIsA.isChecked() == self.config.showIsA(): self.config.setShowIsA(self.settings.showIsA.isChecked()) self.settings.packageViewChanged.emit() if not self.settings.systemTray.isChecked() == self.config.systemTray() or \ not self.settings.intervalSpin.value() == self.config.updateCheckInterval() or \ not self.settings.intervalCheck.isChecked() == self.config.updateCheck() or \ not self.settings.hideIfNoUpdate.isChecked() == self.config.hideTrayIfThereIsNoUpdate(): self.config.setSystemTray(self.settings.systemTray.isChecked()) self.config.setUpdateCheck(self.settings.intervalCheck.isChecked()) self.config.setUpdateCheckInterval(self.settings.intervalSpin.value()) self.config.setHideTrayIfThereIsNoUpdate(self.settings.hideIfNoUpdate.isChecked()) self.settings.traySettingChanged.emit() self.config.setInstallUpdatesAutomatically(self.settings.installUpdates.isChecked()) class CacheSettings(SettingsTab): def setupUi(self): self.initialize() def initialize(self): config = self.iface.getConfig() cache = config.get("general", "package_cache") cache_limit = config.get("general", "package_cache_limit") cache_limit = int(cache_limit) if cache_limit else 0 cache_dir = config.get("directories", "cached_packages_dir") cache_dir = str(cache_dir) if cache_dir else '/var/cache/pisi/packages' # If pisi.conf does not have it yet, default is use package cache if not cache or cache == "True": enableCache = True else: enableCache = False self.cacheEnabled = enableCache self.cacheSize = cache_limit self.settings.cacheGroup.setEnabled(self.cacheEnabled) self.settings.useCacheCheck.setChecked(enableCache) self.settings.useCacheSpin.setValue(cache_limit) self.settings.cacheDirPath.setText(cache_dir) bandwidth_limit = config.get("general", "bandwidth_limit") bandwidth_limit = int(bandwidth_limit) if bandwidth_limit else 0 self.settings.useBandwidthLimit.setChecked(not bandwidth_limit == 0) self.settings.bandwidthSpin.setValue(bandwidth_limit) def connectSignals(self): self.settings.clearCacheButton.clicked.connect(self.clearCache) self.settings.selectCacheDir.clicked.connect(self.selectCacheDir) self.settings.useCacheCheck.toggled.connect(self.markChanged) self.settings.useCacheSpin.valueChanged.connect(self.markChanged) self.settings.useBandwidthLimit.toggled.connect(self.markChanged) self.settings.bandwidthSpin.valueChanged.connect(self.markChanged) self.settings.openCacheDir.clicked.connect(self.openCacheDir) def openCacheDir(self): cache_dir = unicode(self.
settings.cacheDirPath.text()) if path.exists(cache_dir): QDesktopServices.openUrl(QUrl("file://%s" % cache_dir, QUrl.TolerantMode)) def selectCacheDir(self): selected_dir = QFileDialog.getExistingDirectory(self.settings, self.tr("Open Directory"), "/",
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks) if not selected_dir == '': if not selected_dir == self.settings.cacheDirPath.text(): self.settings.cacheDirPath.setText(selected_dir) self.markChanged() def clearCache(self): if QMessageBox.Yes == QMessageBox.warning(self.settings, self.tr("Warning"), self.tr("All the cached packages will be deleted. Are you sure? "), QMessageBox.Yes | QMessageBox.No): try: self.iface.clearCache(0) except Exception, e: self.settings.parent.cw.exceptionCaught(str(e)) def save(self): self.iface.setCacheLimit(self.settings.useCacheCheck.isChecked(), self.settings.useCacheSpin.value()) self.iface.setConfig("directories", "cached_packages_dir", unicode(self.settings.cacheDirPath.text())) if self.settings.useBandwidthLimit.isChecked(): self.iface.setConfig("general", "bandwidth_limit", str(self.settings.bandwidthSpin.value())) else: self.iface.setConfig("general", "bandwidth_limit", "0") class RepositorySettings(SettingsTab): def setupUi(self): self.settings.repoListView.horizontalHeader().setStretchLastSection(True) self.settings.repoListView.verticalHeader().hide() self.settings.re
montanier/pandora
bin/joinResults.py
Python
lgpl-3.0
2,368
0.029561
#!/usr/bin/python3 import os, sys, random import argparse # this script processes all the log simulations in one dir, and writes the values of one particular attribute into one single file. def prepareProcess(inputDir,simulationFile, separator, output, attribute ): output = open(output, 'w') simulation = open(inputDir+'/'+simulationFile, 'r') # headers output.write('var'+'\n') splittedHeader = simulation.readline().split(separator) for i in range(len(splittedHeader)): if splittedHeader[i] == attribute: indexAttribute = i for line in simulation: splittedLine = line.split(separator) output.write('step' + splittedLine[0]+'\n') output.close() return indexAttribute def processSimulation(inputDir, simulationFile, separator, outputName, attributeIndex): output = open(outputName, 'r') outputTmp = open('tmp', 'w') simulation = open(inputDir+'/'+simulationFile, 'r') # header outputTmp.write(output.readline().strip('\n')+separator+simulationFile+'\n') simulation.readline() for simulationLine in simulation: previousLine = outp
ut.readline().strip('\n')+separator splittedLine = simulationLine.split(separator) value = splittedLine[attributeIndex] outputTmp.write(previousLine+value+'\n') simulation.close() output.close() outputTmp.close() os.rename('tmp', outputName) def main(): parser = argparse.ArgumentParser() parser.add_argument('-i', '--input', default='input', help='directory whe
re simulated files are stored') parser.add_argument('-o', '--output', default='results.csv', help='output file') parser.add_argument('-s', '--separator', default=';', help='separator token between values') parser.add_argument('-a', '--attribute', default='Number of agents', help='name of the attribute column to process') args = parser.parse_args() outputFile = open(args.output, 'w') outputFile.close() header = 0 for root, dirs, simulationFiles in os.walk(args.input): for simulationFile in simulationFiles: if not simulationFile.endswith('.csv'): continue if header == 0: attributeIndex = prepareProcess(args.input,simulationFile, args.separator, args.output, args.attribute) header = 1 print 'processing simulation results in file: ' + simulationFile processSimulation(args.input, simulationFile, args.separator, args.output, attributeIndex) if __name__ == "__main__": main()
mF2C/COMPSs
tests/sources/python/8_argument_error/src/args_error.py
Python
apache-2.0
392
0.002551
#!/usr/bin/python # -*- coding: utf-8 -*- """ PyCOMPSs Testbench Arguments Warnings ===================================== """ # Imports import unittest from modules.testArgumentError import testArgumentError def main(): suite = unittest.TestLoader().loadTestsFro
mTestCase(testArgumentError) unittest.TextTestRunner(verbosity=2).run(suite) if __name__ == "__ma
in__": main()
amitassaraf/angular2-django-boilerplate
{{cookiecutter.github_repository_name}}/src/{{cookiecutter.app_name}}/config/production.py
Python
mit
3,763
0.000797
import os from configurations import values from boto.s3.connection import OrdinaryCallingFormat from {{cookiecutter.app_name}}.config.common import Common try: # Python 2.x import urlparse except ImportError: # Python 3.x from urllib import parse as urlparse class Production(Common): # Honor the 'X-Forwarded-Proto' header for request.is_secure() # https://devcenter.heroku.com/articles/getting-started-with-django SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') INSTALLED_APPS = Common.INSTALLED_APPS SECRET_KEY = values.SecretValue() # Postgres DATABASES = values.DatabaseURLValue('postgres://localhost/{{cookiecutter.app_name}}') # django-secure # http://django-secure.readthedocs.org/en/v0.1.2/settings.html INSTALLED_APPS += ("djangosecure", ) SECURE_HSTS_SECONDS = 60 SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True) SECURE_FRAME_DENY = values.BooleanValue(True) SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True) SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True) SESSION_COOKIE_SECURE = values.BooleanValue(False) SESSION_COOKIE_HTTPONLY = values.BooleanValue(True) SECURE_SSL_REDIRECT = values.BooleanValue(True) # Site # https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts ALLOWED_HOSTS = ["*"] INSTALLED_APPS += ("gunicorn", ) # Template # https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs TEMPLATE_LOADERS = ( ('django.template.loaders.cached.Loader', ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', )), ) # Media files # http://django-storages.readthedocs.org/en/latest/index.html INSTALLED_APPS += ('storages',) DEFAULT_FILE_STOR
AGE = 'storages.backends.s3boto.S3BotoStorage' AWS_ACCESS_KEY_ID = values.Value('DJANGO_AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = values.Value('DJANGO_AWS_SECRET_ACCESS_KEY') AWS_STORAGE_BUCKET_NAME = values.Value('DJANGO_AWS_STORAGE_BUCKET_NAME') AWS_AUTO_CREATE_BUCKE
T = True AWS_QUERYSTRING_AUTH = False MEDIA_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_STORAGE_BUCKET_NAME) AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat() # https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#cache-control # Response can be cached by browser and any intermediary caches (i.e. it is "public") for up to 1 day # 86400 = (60 seconds x 60 minutes x 24 hours) AWS_HEADERS = { 'Cache-Control': 'max-age=86400, s-maxage=86400, must-revalidate', } # Static files STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage' # Caching redis_url = urlparse.urlparse(os.environ.get('REDISTOGO_URL', 'redis://localhost:6379')) CACHES = { 'default': { 'BACKEND': 'redis_cache.RedisCache', 'LOCATION': '{}:{}'.format(redis_url.hostname, redis_url.port), 'OPTIONS': { 'DB': 0, 'PASSWORD': redis_url.password, 'PARSER_CLASS': 'redis.connection.HiredisParser', 'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 50, 'timeout': 20, } } } } # Django RQ production settings RQ_QUEUES = { 'default': { 'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379'), 'DB': 0, 'DEFAULT_TIMEOUT': 500, }, } Common.VERSATILEIMAGEFIELD_SETTINGS['create_images_on_demand'] = False
stackforge/os-ansible-deployment
playbooks/library/git_requirements.py
Python
apache-2.0
10,270
0
#!/usr/bin/python # -*- coding: utf-8 -*- from ansible.module_utils.basic import AnsibleModule import git import itertools import multiprocessing import os import signal import time DOCUMENTATION = """ --- module: git_requirements short_description: Module to run a multithreaded git clone options: repo_info: description: - List of repo information dictionaries containing at a minimum a key entry "src" with the source git URL to clone for each repo. In these dictionaries, one can further specify: "path" - destination clone location "version" - git version to checkout "refspec" - git refspec to checkout "depth" - clone depth level "force" - require git clone uses "--force" default_path: description: Default git clone path (str) in case not specified on an individual repo basis in repo_info. Defaults to "master". Not required. default_version: description: Default git version (str) in case not specified on an individual repo basis in repo_info. Defaults to "master". Not required. default_refspec: description: Default git repo refspec (str) in case not specified on an individual repo basis in repo_info. Defaults to "". Not required. default_depth: description: Default clone depth (int) in case not specified on an individual repo basis. Defaults to 10. Not required. retries: description: Integer number of retries allowed in case of git clone failure. Defaults to 1. Not required. delay: description: Integer time delay (seconds) between git clone retries in case of failure. Defaults to 0. Not required. force: description: Boolean. Apply --force flags to git clones wherever possible. Defaults to False. Not required. core_multiplier: description: Integer multiplier on the number of cores present on the machine to use for multithreading. For example, on a 2 core machine, a multiplier of 4 would use 8 threads. Defaults to 4. Not required. """ EXAMPLES = r""" - name: Clone repos git_requirements: repo_info: "[{'src':'https://github.com/ansible/', 'name': 'ansible' 'dest': '/etc/opt/ansible'}]" """ def init_signal(): signal.signal(signal.SIGINT, signal.SIG_IGN) def check_out_version(repo, version, pull=False, force=False, refspec=None, tag=False, depth=10): try: repo.git.fetch(tags=tag, force=force, refspec=refspec, depth=depth) except Exception as e: return ["Failed to fetch %s\n%s" % (repo.working_dir, str(e))] try: repo.git.checkout(version, force=force) except Exception as e: return [ "Failed to check out version %s for %s\n%s" % (version, repo.working_dir, str(e))] if repo.is_dirty(untracked_files=True) and force: try: repo.git.clean(force=force) except Exception as e: return [ "Failed to clean up repository% s\n%s" % (repo.working_dir, str(e))] if pull: try: repo.git.pull(force=force, refspec=refspec, depth=depth) except Exception as e: return ["Failed to pull repo %s\n%s" % (repo.working_dir, str(e))] return [] def pull_wrapper(info): role_info = info retries = info[1]["retries"] delay = info[1]["delay"] for i in range(retries): success = pull_role(role_info) if success: return True else: time.sleep(delay) info[2].append(["Role {0} failed after {1} retries\n".format(role_info[0], retries)]) return False def pull_role(info): role, config, failures = info required_version = role["version"] version_hash = False if 'version' in role: # If the version is the length of a hash then treat is as one if len(required_version) == 40: version_hash = True def get_repo(dest): try: return git.Repo(dest) except Exception: failtxt = "Role in {0} is broken/not a git repo.".format( role["dest"]) failtxt += "Please dele
te or fix it manually" failures.append(failtxt) return False # if repo exists if os.path.exists(role["dest"]): repo = get_repo(role["dest"])
if not repo: return False # go to next role repo_url = list(repo.remote().urls)[0] if repo_url != role["src"]: repo.remote().set_url(role["src"]) # if they want master then fetch, checkout and pull to stay at latest # master if required_version == "master": fail = check_out_version(repo, required_version, pull=True, force=config["force"], refspec=role["refspec"], depth=role["depth"]) # If we have a hash then reset it to elif version_hash: fail = check_out_version(repo, required_version, force=config["force"], refspec=role["refspec"], depth=role["depth"]) else: # describe can fail in some cases so be careful: try: current_version = repo.git.describe(tags=True) except Exception: current_version = "" if current_version == required_version and not config["force"]: fail = [] pass else: fail = check_out_version(repo, required_version, force=config["force"], refspec=role["refspec"], depth=role["depth"], tag=True) else: try: # If we have a hash id then treat this a little differently if version_hash: git.Repo.clone_from(role["src"], role["dest"], branch='master', no_single_branch=True, depth=role["depth"]) repo = get_repo(role["dest"]) if not repo: return False # go to next role fail = check_out_version(repo, required_version, force=config["force"], refspec=role["refspec"], depth=role["depth"]) else: git.Repo.clone_from(role["src"], role["dest"], branch=required_version, depth=role["depth"], no_single_branch=True) fail = [] except Exception as e: fail = ('Failed cloning repo %s\n%s' % (role["dest"], str(e))) if fail == []: return True else: failures.append(fail) return False def set_default(dictionary, key, defaults): if key not in dictionary.keys(): dictionary[key] = defaults[key] def main(): # Define variables failures = multiprocessing.Manager().list() # Data we can pass in to the module fields = { "repo_info": {"required": True, "type": "list"}, "default_path": {"required": True, "type": "str"}, "default_version": {"required": False, "type": "str", "default": "master"}, "default_refspec": {"required": False, "type": "str", "default": None}, "default_depth": {"required": False, "type": "int", "default": 10}, "retr
krahman/node-java
touch.py
Python
mit
68
0.073529
import
os; f = open('depsVerified', 'w'); f.write('ok'); f.close();
SmartcitySantiagoChile/onlineGPS
beacon/models.py
Python
mit
2,058
0.006317
from django.db import models from django.utils import timezone # Create your models here. def formatDateTime(dateTime): return timezone.localtime(dateTime).strftime("%Y-%m-%d %H:%M:%S") class Beacon(models.Model): macAddr = mode
ls.CharField(max_length=20, unique=True) uuid = models.UUIDField(editable=False) major = models.CharField(max_length=10, null=False) minor = models.CharField(max_length=10, null=False) def getDict(self): dict = {} dict['macAddr'] = self.macAddr dict['uuid'] = str(self.uuid) dict['major'] = self.major dict['minor'] = self.minor return
dict class Meta: unique_together = ('uuid', 'major', 'minor') class DetectorDevice(models.Model): """ device which detects beacons, now only cellphones """ externalId = models.CharField(max_length=32, unique=True) def getDict(self): dict = {} dict['deviceId'] = self.externalId return dict def __str__(self): return self.externalId class BeaconLog(models.Model): time = models.DateTimeField(null=False) rssi = models.IntegerField(null=False) measurePower = models.IntegerField(null=False) beacon = models.ForeignKey(Beacon, on_delete=models.CASCADE) device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE) def __str__(self): return "time: {} | rssi: {} | measurePower: {}".format( self.time, self.rssi, self.measurePower) def getDict(self): dict = {} dict['time'] = formatDateTime(self.time) dict['rssi'] = self.rssi dict['measurePower'] = self.measurePower return dict class Event(models.Model): time = models.DateTimeField(null=False) event = models.TextField(null=False) device = models.ForeignKey(DetectorDevice, on_delete=models.CASCADE) def getDict(self): dict = {} dict['time'] = formatDateTime(self.time) dict['event'] = self.event return dict
kinoreel/kino-gather
processes/insert_movies.py
Python
mit
3,618
0.003317
import json import os from processes.postgres import Postgres from processes.gather_exception import GatherException try: DB_SERVER = os.environ['DB_SERVER'] DB_PORT = os.environ['DB_PORT'] DB_DATABASE = os.environ['DB_DATABASE'] DB_USER = os.environ['DB_USER'] DB_PASSWORD = os.environ['DB_PASSWORD'] except KeyError: try: from processes.GLOBALS import DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD except ImportError: print("No parameters provided") exit() class Main(object): def __init__(self): self.pg = Postgres(DB_SERVER, DB_PORT, DB_DATABASE, DB_USER, DB_PASSWORD) self.source_topic = 'youtube' self.destination_topic = 'movies' def run(self, data): """ This inserts the relevant json information into the table kino.movies. :param data: json data holding information on films. """ imdb_id = data['imdb_id'] omdb_movie_data = data['omdb_main'] tmdb_movie_data = data['tmdb_main'] sql = """insert into kino.languages(language) select y.language from json_to_recordset(%s) x (original_language varchar(1000)) join kino.iso2language y on x.original_language = y.iso3166 where language not in (select language from kino.languages)""" self.pg.pg_cur.execute(sql, (json.dumps(tmdb_movie_data),)) self.pg.pg_conn.commit() # We delete our record from kino.movies first. # Due to foreign keys with 'on delete cascade', this clears all records from # the database associated with that imdb_id. sql = """delete from kino.movies where imdb_id = '{0}'""".format(imdb_id) self.pg.pg_cur.execute(sql) self.pg.pg_conn.commit() # We also delete any records in errored attached to this imdb_id, as # we have successfully gathered information for the film. sql = """delete from kino.errored where imdb_id = '{0}'""".format(imdb_id) self.pg.pg_cur.execute(sql) self.pg.pg_conn.commit() sql = """insert into kino.movies (imdb_id, title, runtime, rated, released, orig_language, plot, tstamp) select x.imdb_id , y.title , y.runtime , x.rated , y.release_date::date , z.language , y.plot , CURRENT_DATE from json_to_recordset(%s) x ( imdb_id varchar(15), rated varchar(10) ) join json_to_recordset(%s) y ( imdb_
id varchar(15), title varchar(1000), runtime integer , release_date date, plot varchar(4000), original_language varchar(1000)) on x.imdb_id = y.imdb_id join kino.iso2language z on y.original_la
nguage = z.iso3166 """ self.pg.pg_cur.execute(sql, (json.dumps(omdb_movie_data), json.dumps(tmdb_movie_data))) if self.pg.pg_cur.rowcount != 1: raise GatherException(omdb_movie_data[0]['imdb_id'], 'No insert into movies, most likely due to a new language') self.pg.pg_conn.commit() sql = """insert into kino.kino_ratings (imdb_id, rating) values (%s, 3) on conflict do nothing""" self.pg.pg_cur.execute(sql, (imdb_id,)) self.pg.pg_conn.commit() return data
damienjones/sculpt-model-tools
setup.py
Python
lgpl-2.1
1,198
0.022538
"""A modest set of tools to work with
Django models.""" # Always prefer setuptools over distutils from setuptools import setup, find_packages # To use a consistent encoding from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the relevant file # with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f: # long_description = f.read() setup( name='sculpt.mo
del_tools', version='0.1', description='A modest set of tools to work with Django models.', long_description='', url='https://github.com/damienjones/sculpt-model-tools', author='Damien M. Jones', author_email='damien@codesculpture.com', license='LGPLv2', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], keywords='', packages=find_packages(), install_requires=[ 'sculpt-common>=0.2', ], # package_data={}, # data_files=[], # entry_points={}, # console_scripts={}, )
Jumpscale/jumpscale_core8
apps/agentcontroller/jumpscripts/jumpscale/network_info.py
Python
apache-2.0
312
0.003205
from JumpScale import j descr = """ Thi
s jumpscript returns network info """ c
ategory = "monitoring" organization = "jumpscale" author = "kristof@incubaid.com" license = "bsd" version = "1.0" roles = [] def action(): return j.sal.nettools.getNetworkInfo() if __name__ == "__main__": print(action())
muhkuh-sys/org.muhkuh.tools-flasher
jonchki/dulwich/pack.py
Python
gpl-2.0
68,723
0.000029
# pack.py -- For dealing with packed git objects. # Copyright (C) 2007 James Westby <jw+debian@jameswestby.net> # Copyright (C) 2008-2013 Jelmer Vernooij <jelmer@jelmer.uk> # # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU # General Public License as public by the Free Software Foundation; version 2.0 # or (at your option) any later version. You can redistribute it and/or # modify it under the terms of either of these two licenses. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # You should have received a copy of the licenses; if not, see # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache # License, Version 2.0. # """Classes for dealing with packed git objects. A pack is a compact representation of a bunch of objects, stored using deltas where possible. They have two parts, the pack file, which stores the data, and an index that tells you where the data is. To find an object you look in all of the index files 'til you find a match for the object name. You then use the pointer got from this as a pointer in to the corresponding packfile. """ from collections import defaultdict import binascii from io import BytesIO, UnsupportedOperation from collections import ( deque, ) import difflib import struct from itertools import chain try: from itertools import imap, izip except ImportError: # Python3 imap = map izip = zip import os import sys from hashlib import sha1 from os import ( SEEK_CUR, SEEK_END, ) from struct import unpack_from import zlib try: import mmap except ImportError: has_mmap = False else: has_mmap = True # For some reason the above try, except fails to set has_mmap = False for plan9 if sys.platform == 'Plan9': has_mmap = False from dulwich.errors import ( # noqa: E402 ApplyDeltaError, ChecksumMismatch, ) from dulwich.file import GitFile # noqa
: E402 from dulwich.lru_cache import ( # noqa: E402 LRUSizeCache, ) from dulwich.objects import ( # noqa: E402 ShaFile, hex_to_sha, sha_to_hex, object_header, ) OFS_DELTA = 6 REF_DELTA = 7 DELTA_TYPES = (OFS_DELTA, REF_DELTA) DEFAULT_PACK_DELTA_WINDOW_SIZE =
10 def take_msb_bytes(read, crc32=None): """Read bytes marked with most significant bit. :param read: Read function """ ret = [] while len(ret) == 0 or ret[-1] & 0x80: b = read(1) if crc32 is not None: crc32 = binascii.crc32(b, crc32) ret.append(ord(b[:1])) return ret, crc32 class UnpackedObject(object): """Class encapsulating an object unpacked from a pack file. These objects should only be created from within unpack_object. Most members start out as empty and are filled in at various points by read_zlib_chunks, unpack_object, DeltaChainIterator, etc. End users of this object should take care that the function they're getting this object from is guaranteed to set the members they need. """ __slots__ = [ 'offset', # Offset in its pack. '_sha', # Cached binary SHA. 'obj_type_num', # Type of this object. 'obj_chunks', # Decompressed and delta-resolved chunks. 'pack_type_num', # Type of this object in the pack (may be a delta). 'delta_base', # Delta base offset or SHA. 'comp_chunks', # Compressed object chunks. 'decomp_chunks', # Decompressed object chunks. 'decomp_len', # Decompressed length of this object. 'crc32', # CRC32. ] # TODO(dborowitz): read_zlib_chunks and unpack_object could very well be # methods of this object. def __init__(self, pack_type_num, delta_base, decomp_len, crc32): self.offset = None self._sha = None self.pack_type_num = pack_type_num self.delta_base = delta_base self.comp_chunks = None self.decomp_chunks = [] self.decomp_len = decomp_len self.crc32 = crc32 if pack_type_num in DELTA_TYPES: self.obj_type_num = None self.obj_chunks = None else: self.obj_type_num = pack_type_num self.obj_chunks = self.decomp_chunks self.delta_base = delta_base def sha(self): """Return the binary SHA of this object.""" if self._sha is None: self._sha = obj_sha(self.obj_type_num, self.obj_chunks) return self._sha def sha_file(self): """Return a ShaFile from this object.""" return ShaFile.from_raw_chunks(self.obj_type_num, self.obj_chunks) # Only provided for backwards compatibility with code that expects either # chunks or a delta tuple. def _obj(self): """Return the decompressed chunks, or (delta base, delta chunks).""" if self.pack_type_num in DELTA_TYPES: return (self.delta_base, self.decomp_chunks) else: return self.decomp_chunks def __eq__(self, other): if not isinstance(other, UnpackedObject): return False for slot in self.__slots__: if getattr(self, slot) != getattr(other, slot): return False return True def __ne__(self, other): return not (self == other) def __repr__(self): data = ['%s=%r' % (s, getattr(self, s)) for s in self.__slots__] return '%s(%s)' % (self.__class__.__name__, ', '.join(data)) _ZLIB_BUFSIZE = 4096 def read_zlib_chunks(read_some, unpacked, include_comp=False, buffer_size=_ZLIB_BUFSIZE): """Read zlib data from a buffer. This function requires that the buffer have additional data following the compressed data, which is guaranteed to be the case for git pack files. :param read_some: Read function that returns at least one byte, but may return less than the requested size. :param unpacked: An UnpackedObject to write result data to. If its crc32 attr is not None, the CRC32 of the compressed bytes will be computed using this starting CRC32. After this function, will have the following attrs set: * comp_chunks (if include_comp is True) * decomp_chunks * decomp_len * crc32 :param include_comp: If True, include compressed data in the result. :param buffer_size: Size of the read buffer. :return: Leftover unused data from the decompression. :raise zlib.error: if a decompression error occurred. """ if unpacked.decomp_len <= -1: raise ValueError('non-negative zlib data stream size expected') decomp_obj = zlib.decompressobj() comp_chunks = [] decomp_chunks = unpacked.decomp_chunks decomp_len = 0 crc32 = unpacked.crc32 while True: add = read_some(buffer_size) if not add: raise zlib.error('EOF before end of zlib stream') comp_chunks.append(add) decomp = decomp_obj.decompress(add) decomp_len += len(decomp) decomp_chunks.append(decomp) unused = decomp_obj.unused_data if unused: left = len(unused) if crc32 is not None: crc32 = binascii.crc32(add[:-left], crc32) if include_comp: comp_chunks[-1] = add[:-left] break elif crc32 is not None: crc32 = binascii.crc32(add, crc32) if crc32 is not None: crc32 &= 0xffffffff if decomp_len != unpacked.decomp_len: raise zlib.error('decompressed data does not match expected size') unpacked.crc32 = crc32 if include_comp: unpacked.comp_chunks = comp_chunks return unused def iter_sha1(iter): """Return the hexdigest of the SHA1 over a set of names. :param iter: Iterator over string objects :ret
SCECcode/BBP
bbp/comps/bbtoolbox_cfg.py
Python
apache-2.0
5,200
0.000769
#!/usr/bin/env python """ Copyright 2010-2018 University Of Southern California Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distri
buted on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This module defines the configuration parameters for the BBToolbox script """ from __future__ impor
t division, print_function # Import Python modules import os import sys # Import Broadband modules import cc import bband_utils class BBToolboxCfg(object): """ Define the configuration parameters for the SDSU BBToolbox program """ cfgdict = {} def getval(self, attr): try: val = self.cfgdict[attr] except KeyError: print("Invalid Source File - Missing attribute: %s" % (attr)) print("Exiting") sys.exit(1) return val def parse_src(self, a_srcfile): """ This function calls bband_utils's parse property file function to get a dictionary of key, value pairs and then looks for a the parameters needed by bbtoolbox """ self.cfgdict = bband_utils.parse_properties(a_srcfile) val = self.getval("depth_to_top") self.DEPTH_TO_TOP = float(val) val = self.getval("fault_length") self.LENGTH = float(val) val = self.getval("dip") self.DIP = float(val) val = self.getval("rake") self.RAKE = float(val) val = self.getval("hypo_along_stk") self.HYPO_ALONG_STK = float(val) val = self.getval("hypo_down_dip") self.HYPO_DOWN_DIP = float(val) val = self.getval("magnitude") self.MAG = float(val) val = self.getval("seed") self.SEED = int(float(val)) # Now look for the optional grid parameters if 'grid_x' in self.cfgdict: self.grid_x = float(self.getval("grid_x")) if 'grid_y' in self.cfgdict: self.grid_y = float(self.getval("grid_y")) if 'grid_z' in self.cfgdict: self.grid_z = float(self.getval("grid_z")) # # Read parameters out of the source file to obtain parameters # needed by the BBcoda codes # fcodes = cc.find_fx_fy_fz(self.HYPO_ALONG_STK, self.LENGTH, self.DIP, self.HYPO_DOWN_DIP, self.DEPTH_TO_TOP) self.fsx = fcodes[0] self.fsy = fcodes[1] self.fsz = fcodes[2] #print ("ETH conversion from hypalongstk: " # "%f flength: %f dip: %f hypdowndip: %f depthtotop: %f\n" % # (self.HYPO_ALONG_STK, # self.LENGTH, # self.DIP, # self.HYPO_DOWN_DIP, # self.DEPTH_TO_TOP)) #print ("resulting fsx: %f fxy: %f fsz: %s\n" % (self.fsx, # self.fsy, # self.fsz)) def calculate_stress(self): """ This function calculates the stress parameters for SDSU based on the depth of the fault. These values are calibrated for use in Eastern North America """ stress = 16.0 * self.DEPTH_TO_TOP + 225 stress = stress * 10**6 return stress def __init__(self, a_srcfile=None): """ Set up some parameters for BBToolbox """ self.MAG = None self.grid_x = None self.grid_y = None self.grid_z = 125.0 self.copy_lf_seismograms = True # Parse src file, if given if a_srcfile: self.parse_src(a_srcfile) self.MODALITY = 1 # GS_FLAG: Don't change it here, override it in the velocity # model config file using a 'CODEBASE_SDSU_GS_FLAG = XXX' line # 1: Western US (active region), # 2: Eastern NA (stable region), # 3: Japan self.GS_FLAG = 1 # NGAW_FLAG: Don't change it here, override it in the velocity # model config file using a 'CODEBASE_SDSU_NGAW_FLAG = XXX' line # 1: NGA-WEST1 # 2: NGA-WEST2 self.NGAW_FLAG = 2 self.KAPPA = 0.04 self.Q_CODA = 150.0 self.FDEC = 0.8 self.AFAC = 41.0 self.BFAC = 34.0 self.SOURCE_MECH = "rs" self.SOURCE_FUNC = "dreg" self.VERBOSE = "on" self.TR_SCA = 0.075 self.STR_FAC = 50.e6 # 06/10/11: Sandarsh MK # Note: Setting FMAX = 20.00 Hz will # cause BBtoolbox to produce NaNs in 000 and 090 seismograms. self.FMAX = 100.00 if __name__ == "__main__": BBCODA2 = BBToolboxCfg() print("Created Test Config Class: %s" % (os.path.basename(sys.argv[0])))
zouppen/simulavr
regress/test_opcodes/test_LD_X_decr.py
Python
gpl-2.0
3,080
0.017857
#! /usr/bin/env python ############################################################################### # # simulavr - A simulator for the Atmel AVR family of microcontrollers. # Copyright (C) 2001, 2002 Theodore A. Roth # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### # # $Id: test_LD_X_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $ # """Test the LD_X_decr opcode. """ import base_test from registers import Reg, SREG class LD_X_decr_TestFail(base_test.TestFail): pass class base_LD_X_decr(base_test.opcode_test): """Generic test case for testing LD_X_decr opcode. LD_X_decr - Load Indirect from data space to Register using index X and pre decrement X. Operation: X <- X - 1 then Rd <- (X) opcode is '1001 000d dddd 1110' where 0 <= d <= 31 and d != {26,27} Only registers PC, R26, R27 and Rd should be changed. """ def setup(self): # Set the register values self.setup_regs[self.Rd] = 0 self.setup_regs[Reg.R26] = (self.X & 0xff) self.setup_regs[Reg.R27] = ((self.X >> 8) & 0xff) # set up the val in memory (memory is read after X is decremented, # thus we need to write to memory _at_ X - 1) self.mem_byte_write( self.X - 1, self.Vd ) # Return the raw opcode return 0x900E | (self.Rd << 4) def analyze_results(self): self.reg_changed.extend( [self.Rd, Reg.R26, Reg.R27] ) # check that result is correct expect = self.Vd got = self.anal_regs[self.Rd] if expect != got: self.fail('LD_X_decr: expect=%02x, got=%02x' % (expect, got)) # check that X was decremented expect = self.X - 1 got = (self.anal_regs[Reg.R26] & 0xff) | ((self.anal_regs[Reg.R27] << 8) & 0xff00) if expect !
= got: self.fail('LD_X_decr X not decr: expect=%04x, got=%04x' % (expect, got)) # # Template code for test case. # The fail method will raise a test specific exception. # template = """ class LD_X_decr_r%02d_X%04x_v%02x_TestFail(LD_X_decr_TestFail): pass class test_LD_X_decr_r%02d_X%04x_v%02x(base_LD_X_decr): Rd = %d X = 0x%x Vd = 0x%x def fail(self,s): raise LD_X_decr_r%02d_X%04x_v%02x_TestFail, s """ # # automagically generate the test_LD_X_decr_rNN_vXX class definitions. # # Operation is un
defined for d = 26 and d = 27. # code = '' for d in range(0,26)+range(28,32): for x in (0x10f, 0x1ff): for v in (0xaa, 0x55): args = (d,x,v)*4 code += template % args exec code
kernsuite-debian/lofar
CEP/Pipeline/recipes/sip/master/imager_finalize.py
Python
gpl-3.0
7,155
0.004612
import sys import lofarpipe.support.lofaringredient as ingredient from lofarpipe.support.baserecipe import BaseRecipe from lofarpipe.support.remotecommand import RemoteCommandRecipeMixIn from lofarpipe.support.remotecommand import ComputeJob from lofarpipe.support.data_map import DataMap, validate_data_maps, \ align_data_maps class imager_finalize(BaseRecipe, RemoteCommandRecipeMixIn): """ The Imager_finalizer performs a number of steps needed for integrating the msss_imager_pipeline in the LOFAR framework: It places the image on the output location in the correcy image type (hdf5). It also adds some meta data collected from the individual measurement sets and the found data. This recipe does not have positional commandline arguments """ inputs = { 'awimager_output_map': ingredient.FileField( '--awimager-output-mapfile', help = """Mapfile containing (host, path) pairs of created sky images """ ), 'ms_per_image_map': ingredient.FileField( '--ms-per-image-map', help = '''Mapfile containing (host, path) pairs of mapfiles used to create image on that node''' ), 'sourcelist_map': ingredient.FileField( '--sourcelist-map', help = '''mapfile containing (host, path) pairs to a list of sources found in the image''' ), 'sourcedb_map': ingredient.FileField( '--sourcedb_map', help = '''mapfile containing (host, path) pairs to a db of sources found in the image''' ), 'target_mapfile': ingredient.FileField( '--target-mapfile', help = "Mapfile containing (host, path) pairs to the concatenated and" "combined measurement set, the source for the actual sky image" ), 'minbaseline': ingredient.FloatField( '--minbaseline', help = '''Minimum length of the baseline used for the images''' ), 'maxbaseline': ingredient.FloatField( '--maxbaseline', help = '''Maximum length of the baseline used for the images''' ), 'output_image_mapfile': ingredient.FileField( '--output-image-mapfile', help = '''mapfile containing (host, path) pairs with the final output image (hdf5) location''' ), 'processed_ms_dir': ingredient.StringField( '--processed-ms-dir', help = '''Path to directory for processed measurment sets''' ), 'fillrootimagegroup_exec': ingredient.ExecField( '--fillrootimagegroup_exec', help = '''Full path to the fillRootImageGroup executable''' ), 'placed_image_mapfile': ingredient.FileField( '--placed-image-mapfile', help = "location of mapfile with proced and correctly placed," " hdf5 images" ) } outputs = { 'placed_image_mapfile': ingredient.StringField() } def go(self): """ Steps: 1. Load and validate the input datamaps 2. Run the node parts of the recipe 3. Validate node output and format the recipe output """ super(imager_finalize, self).go() # ********************************************************************* # 1. Load the datamaps awimager_output_map = DataMap.load( self.inputs["awimager_output_map"]) ms_per_image_map = DataMap.load( self.inputs["ms_per_image_map"]) sourcelist_map = DataMap.load(self.inputs["sourcelist_map"]) sourcedb_map = DataMap.load(self.inputs["sourcedb_map"]) target_mapfile = DataMap.load(self.inputs["target_mapfile"]) output_image_mapfile = DataMap.load( self.inputs["output_image_mapfile"]) processed_ms_dir = self.inputs["processed_ms_dir"] fillrootimagegroup_exec = self.inputs["fillrootimagegroup_exec"] # Align the skip fields align_data_maps(awimager_output_map, ms_per_image_map, sourcelist_map, target_mapfile, output_image_mapfile, sourcedb_map) # Set the correct iterator sourcelist_map.iterator = awimager_output_map.iterator = \ ms_per_image_map.iterator = target_mapfile.iterator = \ output_image_mapfile.iterator = sourcedb_map.iterator = \ DataMap.SkipIterator # ********************************************************************* # 2. Run the node side of the recupe command = " python3 %s" % (self.__file__.replace("master", "nodes")) jobs = [] for (awimager_output_item, ms_per_image_item, sourcelist_item, target_item, output_image_item, sourcedb_item) in zip( awimager_output_map, ms_per_image_map, sourcelist_map, target_mapfile, output_image_mapfile, sourcedb_map): # collect the files as argument arguments = [awimager_output_item.file, ms_per_image_item.file, sourcelist_item.file, target_item.file, output_image_item.file, self.inputs["minbaseline"], self.inputs["maxbaseline"], processed_ms_dir, fillrootimagegroup_exec, self.environment, sourcedb_item.file] self.logger.info( "Starting finalize with the folowing args: {0}".format( arguments)) jobs.append(ComputeJob(target_item.host, command, arguments)) self._schedule_jobs(jobs) # ********************************************************************* # 3. Validate the performance of the node script and assign output succesful_run = False for (job, output_image_item) in zip(jobs, output_image_mapfile): if not "hdf5" in job.results: # If the output failed set the skip to True output_image_item.skip = True else: succesful_run = True # signal that we have at least a single run finished ok. # No need to set skip in this case if not succesful_run: self.logger.warn("Failed finalizer node run detected") return 1 output_image_mapfile.save(self.inputs['placed_image_mapfile']) self.logger.debug( "Wrote mapfile contain
ing placed hdf5 imag
es: {0}".format( self.inputs['placed_image_mapfile'])) self.outputs["placed_image_mapfile"] = self.inputs[ 'placed_image_mapfile'] return 0 if __name__ == '__main__': sys.exit(imager_finalize().main())
Outernet-Project/librarian
librarian/core/utils/iterables.py
Python
gpl-3.0
2,296
0
""" Functions and decorators for making sure the parameters they work on are of iterable types. Copyright 2014-2015, Outernet Inc. Some rights reserved. This software is free software licensed under the terms of GPLv3. See COPYING file that comes with the source code, or http://www.gnu.org/licenses/gpl.txt. """ import functools import numbers def is_integral(obj): """ Determine whether the passed in object is a number of integral type. """ return isinstance(obj, numbers.Integral) def is_string(ob
j): """ Det
ermine if the passed in object is a string. """ try: return isinstance(obj, basestring) except NameError: return isinstance(obj, str) def is_iterable(obj): """ Determine if the passed in object is an iterable, but not a string or dict. """ return (hasattr(obj, '__iter__') and not isinstance(obj, dict) and not is_string(obj)) def as_iterable(params=None): """ Make sure the marked parameters are iterable. In case a single-unwrapped parameter is found among them (e.g. an int, string, ...), wrap it in a list and forward like that to the wrapped function. The marked parameters, if not explicitly specified, defaults to the 1st argument (``args[1]``). """ # set up default converter and separate positional from keyword arguments params = params or [1] indexes = [i for i in params if is_integral(i)] keys = [k for k in params if is_string(k)] def decorator(fn): @functools.wraps(fn) def wrapper(*args, **kwargs): # patch positional arguments, if needed if indexes: # copy `args` into a new list and wrap it's elements in a list # on the specified indexes, which are not iterables themselves args = [[x] if i in indexes and not is_iterable(x) else x for (i, x) in enumerate(args)] # patch keyword arguments, if needed if keys: for key in keys: if not is_iterable(kwargs[key]): kwargs[key] = [kwargs[key]] # invoke ``fn`` with patched parameters return fn(*args, **kwargs) return wrapper return decorator
yasar11732/arch-package-quiz
packagequiz/questionGenerator.py
Python
gpl-3.0
4,801
0.005626
#!/usr/bin/python # -*- coding: utf-8 -*- """ Copyright 2011 Yaşar Arabacı This file is part of packagequiz. packagequiz is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import pyalpm from pycman import config import question as q from random import choice, randint from sys import modules config.init_with_config("/etc/pacman.conf") localdb = pyalpm.get_localdb() #questionTypes= (q.definition,q.depends, # q.fileOwner,q.installedSize, # q.packager) types = [getattr(q, t) for t in dir(q) if str(type(getattr(q, t))) == "<class 'type'>"] questionTypes = [qtype for qtype in types if (issubclass(qtype, q.Question) and qtype is not q.Question)] del(types) def getRandomQuestion(package=None, numWrongAnswers=3): """Returns a tuple with size of 3, first question text, second correct answer, third list of wrong answers @param package: A pyalpm.package type @param numWrongAnswers: integer @return: tuple """ qToReturn = None if package == None: package = getRandomPackage() global questionTypes while not qToReturn: qtype = choice(questionTypes) question = qtype(package) func = getattr(modules[__name__], "_" + question.type) qToReturn = func(question, numWrongAnswers) return qToReturn def getRandomPackage(exception=[]): """ Return a random package @ param exception: list of packages as an exception @ return: a package """ global localdb package = choice(localdb.pkgcache) if len(exception) == 0: return package else: while package.name in exception: package = choice(localdb.pkgcache) return package def qgenerator(function): def generate(question, numWrongAnswers=3): if question.correctAnswer is None: return None if isinstance(question.correctAnswer, list): if len(question.correctAnswer) > 0: correct_answer = choice(question.correctAnswer) else: return None else: correct_answer = question.correctAnswer wrong_answers = [] while len(wrong_answers) < numWrongAnswers: answer = function(question, numWrongAnswers) if answer not in wrong_answers and answer is not None: wrong_answers.append(answer) return (question.text, correct_answer, wrong_answers,question.points) return generate @qgenerator def _definition(question, numWrongAnswers=3): return getRandomPackage([question.package.name]).desc @qgenerator def _depends(question, numWrongAnswers=3): pkg = getRandomPackage([question.correctAnswer]) return pkg.name + "(" + pkg.desc + ")" def _requiredBy(question, numWrongAnswers=3): global localdb if len(question.correctAnswer) >
0: correct_answer_name = choice(question.correctAnswer) correct_answer_package = localdb.get_pkg(correct_answer_name) correct_answer = correct_answer_name + "(" + correct_answer_package.desc + ")" else: return None wrong_answers = [] while len(wrong_answers) < numWrongAnswers:
pkg = getRandomPackage([pkg for pkg in question.correctAnswer]) answer = pkg.name + "(" + pkg.desc + ")" if answer not in wrong_answers and answer is not None: wrong_answers.append(answer) return (question.text, correct_answer, wrong_answers,question.points) #@qgenerator #def _installedSize(question, numWrongAnswers=3): # (type(question.correctAnswer)) # while True: # rand = randint(int(question.correctAnswer * 0.1), int(question.correctAnswer * 1.9)) # (rand) # (type(rand)) # if rand != question.correctAnswer: # return rand # #@qgenerator #def _maintainer(question, numWrongAnswers=3): # while True: # rand_pack = getRandomPackage() # if rand_pack.packager != question.correctAnswer: # return rand_pack.packager # #@qgenerator #def _fileOwner(question, numWrongAnswers=3): # # return getRandomPackage([question.correctAnswer]).name if __name__ == "__main__": (getRandomQuestion())
tiagocoutinho/bliss
bliss/common/shutter.py
Python
lgpl-3.0
10,215
0.004307
# -*- coding: utf-8 -*- # # This file is part of the bliss project # # Copyright (c) 2017 Beamline Control Unit, ESRF # Distributed under the GNU LGPLv3. See LICENSE for more info. import functools from gevent import lock from bliss.config.conductor.client import Lock from bliss.config.channels import Cache from bliss.config.settings import HashObjSetting from bliss.common.switch import Switch as BaseSwitch class ShutterSwitch(BaseSwitch): def __init__(self, set_open, set_closed, is_opened): BaseSwitch.__init__(self, "ShutterSwitch"+str(id(self)), {}) self._set_open = set_open self._set_closed = set_closed self._is_opened = is_opened def _states_list(self): return ["OPEN", "CLOSED"] def _set(self, state): if state == "OPEN": return self._set_open() else: return self._set_closed() def _get(self): if self._is_opened(): return "OPEN" else: return "CLOSED" class Shutter(object): MANUAL,EXTERNAL,CONFIGURATION = range(3) # modes MODE2STR = {MANUAL: ("MANUAL", "Manual mode"), EXTERNAL: ("EXTERNAL", "External trigger mode"), CONFIGURATION: ("CONFIGURATION", "Configuration mode"), } OPEN,CLOSED,UNKNOWN = range(3) # state STATE2STR = { OPEN: ("OPEN", "Shutter is open"), CLOSED: ("CLOSED", "Shutter is closed"), UNKNOWN: ("UNKNOWN", "Unknown shutter state"), } """ Generic shutter object This interface should be used for all type of shutter (motor,fast...) You may want to link this shutter with an external control i.e: wago,musst.... in that case you have to put in configuration **external-control** with the object reference. This external control should be compatible with the Switch object and have an OPEN/CLOSED states. """ def lazy_init(func): @functools.wraps(func) def func_wrapper(self,*args,**kwargs): self.init() with Lock(self): return func(self,*args,**kwargs) return func_wrapper def __init__(self,name,config): self.__name = name self.__config = config self._external_ctrl = config.get('external-control') self.__settings = HashObjSetting('shutter:%s' % name) self.__initialized_hw = Cache(self,"initialized", default_value = False) self.__state = Cache(self,"state", default_value = Shutter.UNKNOWN) self._init_flag = False self.__lock = lock.Semaphore() def init(self): """ initialize the shutter in the current mode. this is method is called by lazy_init """ if self._external_ctrl is not None: # Check if the external control is compatible # with a switch object and if it has open/close state ext_ctrl = self._external_ctrl name = ext_ctrl.name if hasattr(ext_ctrl,'name') else "unknown" try: states = ext_ctrl.states_list() ext_ctrl.set ext_ctrl.get except AttributeError: raise ValueError('external-ctrl : {0} is not compatible ' 'with a switch object'.format(name)) else: if(not 'OPEN' in states or not 'CLOSED' in states): raise ValueError("external-ctrl : {0} doesn't" " have 'OPEN' and 'CLOSED' states".format(name)) if not self._init_flag: self._init_flag = True try: self._init() with Lock(self): with self.__lock: if not self.__initialized_hw.value: self._initialize_hardware() self.__initialized_hw.
value = True except: self._init_flag = False raise def _init(self): """ This method should contains all software initialization like comm
unication, internal state... """ raise NotImplementedError def _initialize_hardware(self): """ This method should contains all commands needed to initialize the hardware. It's will be call only once (by the first client). """ pass @property def name(self): return self.__name @property def config(self): return self.__config @property def settings(self): return self.__settings @property def mode(self): """ shutter mode can be MANUAL,EXTERNAL,CONFIGURATION In CONFIGURATION mode, shutter can't be opened/closed. **CONFIGURATION** could mean that the shutter is in tuning mode i.e: changing open/close position in case of a motor. In EXTERNAL mode, the shutter will be controlled through the external-control handler. If no external control is configured open/close won't be authorized. """ return self.__settings.get('mode',Shutter.MANUAL) @mode.setter def mode(self,value): if value not in self.MODE2STR: raise ValueError("Mode can only be: %s" %\ ','.join((x[0] for x in self.MODE2STR.values()))) self.init() self._set_mode(value) if value in (self.CONFIGURATION,self.EXTERNAL): # Can't cache the state if external or configuration self.__state.value = self.UNKNOWN self.__settings['mode'] = value @property def state(self): self.init() mode = self.mode if mode == self.MANUAL and self.__state.value == self.UNKNOWN: return_state = self._state() self.__state.value = return_state return return_state else: if mode == self.EXTERNAL: if self.external_control is not None: switch_state = self.external_control.get() return self.OPEN if switch_state == "OPEN" else self.CLOSED else: return self.UNKNOWN elif mode == self.CONFIGURATION: return self.UNKNOWN return self.__state.value def _state(self): raise NotImplementedError @property def state_string(self): return self.STATE2STR.get(self.state,self.STATE2STR[self.UNKNOWN]) @property def external_control(self): return self._external_ctrl @lazy_init def opening_time(self): """ Return the opening time if available or None """ return self._opening_time() def _opening_time(self): return self.__settings.get('opening_time') @lazy_init def closing_time(self): """ Return the closing time if available or None """ return self._closing_time() def _closing_time(self): return self.__settings.get('closing_time') def measure_open_close_time(self): """ This small procedure will in basic usage do an open and close of the shutter to measure the opening and closing time. Those timing will be register into the settings. returns (opening,closing) time """ previous_mode = self.mode() try: if previous_mode != self.MANUAL: self.mode(self.MANUAL) opening_time,closing_time = self._measure_open_close_time() self.__settings['opening_time'] = opening_time self.__settings['closing_time'] = closing_time return open_time,close_time finally: if previous_mode != self.MANUAL: self.mode(previous_mode) def _measure_open_close_time(self): """ This method can be overloaded if needed. Basic timing on """ self.close
chromium/chromium
third_party/android_deps/libs/io_github_java_diff_utils_java_diff_utils/3pp/fetch.py
Python
bsd-3-clause
2,503
0
#!/usr/bin/env python3 # Copyright 2021 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # This is generated, do not edit. Update BuildConfigGenerator.groovy and # 3ppFetch.template instead. import argparse import json import os import re import urllib.request _REPO_URL = 'https://repo.maven.apache.org/maven2' _GROUP_NAME = 'io/github/java-diff-utils' _MODULE_NAME = 'java-diff-utils' _FILE_EXT = 'jar' _OVERRIDE_LATEST = None _PATCH_VERSION = 'cr1' def do_latest(): if _OVERRIDE_LATEST is not None: print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}') return maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format( _REPO_URL, _GROUP_NAME, _MODULE_NAME) metadata = urllib.request.urlopen(maven_metadata_url).read().decode( 'utf-8') # Do not parse xml with the python included parser since it is susceptible # to maliciously crafted xmls. Only use regular expression parsing to be # safe. RE should be enough to handle what we need to extract. match = re.search('<latest>([^<]+)</latest>', metadata) if match: latest = match.group(1) else: # if no latest info was found just hope the versions are sorted and the # last one is the latest (as is commonly the cas
e). latest = re.findall('<version>([^<]+)</version>', metadata)[-1] print(latest + f'.{_PATCH_VERSION}') def get_download_url(version): # Remove the patch version when getting the download url version_no_patch, patch = version.rsplit('.', 1) if patch.startswith('cr'): v
ersion = version_no_patch file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME, _MODULE_NAME, version, _FILE_EXT) file_name = file_url.rsplit('/', 1)[-1] partial_manifest = { 'url': [file_url], 'name': [file_name], 'ext': '.' + _FILE_EXT, } print(json.dumps(partial_manifest)) def main(): ap = argparse.ArgumentParser() sub = ap.add_subparsers() latest = sub.add_parser('latest') latest.set_defaults(func=lambda _opts: do_latest()) download = sub.add_parser('get_url') download.set_defaults( func=lambda _opts: get_download_url(os.environ['_3PP_VERSION'])) opts = ap.parse_args() opts.func(opts) if __name__ == '__main__': main()
duoduo369/django-scaffold
myauth/models.py
Python
mit
1,991
0.00387
# -*- coding: utf-8 -*- from django.conf import settings from django.contrib.auth.models import User from django.db import models from django.db.models.signals import post_save, pre_save from django.dispatch import receiver from django.utils.translation import ugettext as _ class UserProfile(models.Model): ''' username: 用户名是唯一的,可以为Null nickname: 昵称是可以变的,可以重复 ''' user = models.OneToOneField(User, unique=True, related_name='profile', verbose_name=_('用户')) username = models.CharField(blank=True, null=True, unique=True, max_length=255, verbose_name=_('用户名(唯一)')) nickname = models.CharField(blank=True, max_length=255, db_index=True, default='', verbose_name=_('昵称(可变)')) created_at = models.DateTimeField(auto_now_add=True, verbose_name=_('创建日期')) updated_at = models.DateTimeField(auto_now=True, verbose_name=_('修改日期')) class Meta: db_table = "auth_userprofile" verbose_name = _('用户附加信息') verbose_name_plural = _('用户附加信息') @receiver(pre_save, sender=User) def pre_save_user_handler(sender, instance, **kwargs): ''' 保存用户前如果开启了EMAIL_AS_USERNAME, 需要将email字段设为username ''' if settings.FEATURES.get('EMAIL_AS_USERNAME'): if not instance.email or instance.email.strip() != instance.username.strip(): instance.email = instance.username @receiver(post_save, sender
=User) def post_save_user_ha
ndler(sender, instance, created, **kwargs): try: profile = instance.profile except UserProfile.DoesNotExist: profile = UserProfile(user=instance) profile.save() @receiver(pre_save, sender=UserProfile) def pre_save_userprofile_handler(sender, instance, **kwargs): ''' 保存profile前,如果用户名为空,则设置为None, 躲避unique检查 ''' if not instance.username: instance.username = None
OpenBCI/OpenBCI_Python
scripts/test.py
Python
mit
937
0.004269
from __future__ import pr
int_function import sys sys.path.append('..') # help python find cyton.py relative to scripts folder from openbci import cyton as bci import logging import time def printData(sample): # os.system('clear') print("------------
----") print("%f" % (sample.id)) print(sample.channel_data) print(sample.aux_data) print("----------------") if __name__ == '__main__': # port = '/dev/tty.OpenBCI-DN008VTF' port = '/dev/tty.usbserial-DB00JAM0' # port = '/dev/tty.OpenBCI-DN0096XA' baud = 115200 logging.basicConfig(filename="test.log", format='%(asctime)s - %(levelname)s : %(message)s', level=logging.DEBUG) logging.info('---------LOG START-------------') board = bci.OpenBCICyton(port=port, scaled_output=False, log=True) print("Board Instantiated") board.ser.write('v') time.sleep(10) board.start_streaming(printData) board.print_bytes_in()
diegoberaldin/PyRequirementManager
src/controller/printers.py
Python
gpl-3.0
4,793
0.000417
from src import model as mdl class LaTeXPrinter(object): def __init__(self, target_file_path): self._target_file_path = target_file_path def run(self): with open(self._target_file_path, 'w') as output: text = self._generate_text() output.write(text) def _generate_text(self): raise NotImplementedError('Override me!') class TablePrinter(LaTeXPrinter): def __init__(self, target_file_path): super(TablePrinter, self).__init__(target_file_path) def _generate_text(self): text = '\\rowcolors{3}{aubergine}{white}\n' text += self._get_table_definition() text += '\\toprule\n' text += self._get_headers() text += '\\midrule\n\\endhead\n' for element in self._get_content(): text += ' & '.join(element) + '\\\\\n' text += '\\bottomrule\n' caption, label = self._get_caption_and_label() text += ('\\rowcolor{white}' + '\\caption{' + caption + '}\\label{' + label + '}\n') text += '\\end{longtable}\n' return text def _get_table_definition(self): raise NotImplementedError('Override me!') def _get_headers(self): raise NotImplementedError('Override me!') def _get_content(self): """Returns an iterable of 3-tuples with the ID, the description and the parent of the item that needs to be printed. """ raise NotImplementedError('Override me!') def _get_caption_and_label(self): """Returns the caption and label of the table to print. """ raise NotImplementedError('Override me!') class UseCaseTablePrinter(TablePrinter): def __init__(self, target_file_path): super(UseCaseTablePrinter, self).__init__(target_file_path) self._uc_id_list = mdl.dal.get_all_use_case_ids() def _get_table_definition(self): return '\\begin{longtable}{lp{.5\\textwidth}l}\n' def _get_headers(self): return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione ' '& \\sffamily\\bfseries Padre\\\n') def _get_content(self): """Returns an iterable (generator) containing a 3-tuple with the ID, description and parent of every use case. """ for uc_id in self._uc_id_list: uc = mdl.dal.get_use_case(uc_id) yield (uc.uc_id, uc.description, uc.parent_id or '--') def _get_caption_and_label(self): return ('Prospetto riepilogativo dei casi d\'uso', 'tab:uclist') class RequirementTablePrinter(TablePrinter): def __init__(self, req_type, priority, target_file_path): super(RequirementTablePrinter, self).__init__(target_file_path) self._req_type = req_type self._priority = priority self._req_id_list
= mdl.dal.get_all_requirement_ids_spec( req_type, priority) def _get_table_definition(self): return '\\begin{longtable}{lp{.5\\textwidth}ll}\n' def _get_headers(self): return ('\\sffamily\\bfseries ID & \\sffamily\\bfseries Descrizione & ' '\\sffamily\\bfseries Fonte & ' '\\sf
family\\bfseries Padre\\\\\n') def _get_content(self): for req_id in self._req_id_list: req = mdl.dal.get_requirement(req_id) source = mdl.dal.get_source(req.source_id) yield (req.req_id, req.description, source.name, req.parent_id or '--') def _get_caption_and_label(self): return ('Elenco dei requisiti {0} {1}.'.format( ('funzionali' if self._req_type == 'F' else 'dichiarativi' if self._req_type == 'D' else 'prestazionali' if self._req_type == 'P' else 'qualitativi'), ('obbligatori' if self._priority == 'O' else 'facoltativi' if self._priority == 'F' else 'desiderabili')), 'tab:reqlist{0}{1}'.format(self._req_type, self._priority)) class UseCaseRequirementTrackPrinter(TablePrinter): def __init__(self, target_file_path): super(UseCaseRequirementTrackPrinter, self).__init__(target_file_path) self._uc_id_list = mdl.dal.get_all_use_case_ids() def _get_table_definition(self): return '\\begin{longtable}{lp{.8\textwidth}}\n' def _get_headers(self): return ('\\sffamily\\bfseries Caso d\'uso & ' '\\sffamily\\bfseries Requisiti associati\\\\\n') def _get_content(self): for uc_id in self._uc_id_list: req_ids = mdl.dal.get_use_case_associated_requirements(uc_id) yield (uc_id, ', '.join(req_ids)) def _get_caption_and_label(self): return ('Tracciamento requisiti -- casi d\'uso.', 'tab:ucreqtrack')
praus/shapy
tests/emulation/test_shaping.py
Python
mit
3,245
0.010478
#import logging #logging.basicConfig(level=logging.INFO, datefmt='%H:%M:%S', # format='%(asctime)s %(levelname)s: %(message)s') import unittest import SocketServer, socket import random, time import threading import cStringIO from datetime import datetime from shapy import register_settings register_settings('tests.emulation.settings') from shapy.emulation.shaper import Shaper from tests.mixins import ShaperMixin, ServerMixin from tests.utils import total_seconds class TestCWCShaping(unittest.TestCase, ShaperMixin, ServerMixin): filesize = 2**19 # 0.5MB def setUp(self): self.ser
ver_addr = ('127.0.0.2', 55000) self.client_addr = ('127.0.0.3', 55001) # shaping init ShaperMixin.setUp(self) ServerMixin.run_server(self) with open('/dev/urandom', 'rb') as f: self.randomfile = bytearray(f.read(self.files
ize)) def test_transfer(self): self.sock_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # SO_REUSEADDR: http://stackoverflow.com/questions/3229860/what-is-the-meaning-of-so-reuseaddr-setsockopt-option-linux s = self.sock_client s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(self.client_addr) s.connect(self.server_addr) start = datetime.now() # client -> server sent = 0 while sent < self.filesize: sent += s.send(self.randomfile[sent:sent+4096]) # We have to wait until the server finishes reading data from its socket # and closes the connection. rcvd = s.recv(1024) delay = total_seconds(datetime.now() - start) #delay = delta.seconds + delta.microseconds/float(10**6) tt = self.estimate_transfer_time(self.filesize, self.client_addr[0], self.server_addr[0]) self.assertAlmostEqual(delay, tt, delta=0.4) # server -> client start = datetime.now() while len(rcvd) < self.filesize: rcvd += s.recv(1024) delay = total_seconds(datetime.now() - start) tt = self.estimate_transfer_time(self.filesize, self.server_addr[0], self.client_addr[0]) self.assertAlmostEqual(delay, tt, delta=0.4) # statistics of qdiscs on IFB must correctly reflect the transmitted data self._test_traffic() s.close() def _test_traffic(self): c = self.sh.get_traffic(self.client_addr[0]) s = self.sh.get_traffic(self.server_addr[0]) # qdisc statistics reflect all traffic, including header of each layer, # not only filesize delta = self.filesize/100 self.assertAlmostEqual(c[0], self.filesize, delta=delta) self.assertAlmostEqual(c[1], self.filesize, delta=delta) self.assertAlmostEqual(s[0], self.filesize, delta=delta) self.assertAlmostEqual(s[1], self.filesize, delta=delta) def tearDown(self): if hasattr(self, 'sock_client'): self.sock_client.close() ShaperMixin.tearDown(self)
luispedro/jug
jug/tests/test_store.py
Python
mit
3,891
0.003084
import os import jug.backends.redis_store import jug.backends.file_store import jug.backends.dict_store from jug.backends.redis_store import redis import pytest if not os.getenv('TEST_REDIS'): redis = None try: redisConnectionError = redis.ConnectionError except: redisConnectionError = SystemError @pytest.fixture(scope='function', params=['file', 'dict', 'redis']) def store(tmpdir, request): if request.param == 'file': tmpdir = str(tmpdir) yield jug.backends.file_store.file_store(tmpdir) jug.backends.file_store.file_store.remove_store(tmpdir) elif request.param == 'dict': yield jug.backends.dict_store.dict_store() elif request.param == 'redis': if redis is None: pytest.skip() try: st = jug.redis_store.redis_store('redis:') yield st st.close() except redisConnectionError: pytest.skip() def test_load_get(store): assert len(list(store.list())) == 0 key = b'jugisbestthingever' assert not store.can_load(key) object = list(range(232)) store.dump(object, key) assert store.can_load(key) assert store.load(key) == object flist = list(store.list()) assert len(flist) == 1 assert flist[0] == key store.remove(key) assert not store.can_load(key) def test_lock(store): assert len(list(store.listlocks())) == 0 key = b'jugisbestthingever' lock = store.getlock(key) assert not lock.is_locked() assert lock.get() assert not lock.get() lock2 = store.getlock(key) assert not lock2.get() assert len(list(store.listlocks())) == 1 lock.release() assert lock2.get() lock2.release() def test_lock_remove(store): assert len(list(store.listlocks())) == 0 key = b'jugisbestthingever' lock = store.getlock(key) assert not lock.is_locked() assert lock.get() assert not lock.get() assert len(list(store.listlocks())) == 1 store.remove_locks() assert len(list(store.listlocks())) == 0 def test_lock_fail(store): assert len(list(store.listlocks())) == 0 key = b'jugisbestthingever' lock = store.getlock(key) assert not lock.is_locked() assert lock.get() assert not lock.get() lock.fail() assert lock.is_failed() assert len(list(store.listlocks())) == 1 store.remove_locks() assert not lock.is_failed() assert len(list(store.listlocks())) == 0 def test_lock_fail_other(store): # is_failed should return True even if we can't acquire the lock assert len(list(store.listlocks())) == 0 key = b'jugisbestthingever' lock1 = store.getlock(key) lock2 = store.getlock(key) assert not lock1.is_locked() assert not lock2.is_locked()
assert lock1.get() assert not lock2.get() assert not lock1.is_failed() assert not lock2.is_failed() lock1.fail() assert lock2.is_failed() assert len(list(store.listlocks())) == 1 store.remove_locks() assert not lock1.is_failed() assert not lock2.is_failed() assert len(list(store.listlocks())) == 0 def
test_numpy_array(tmpdir): try: import numpy as np except ImportError: pytest.skip() store = jug.backends.file_store.file_store(str(tmpdir)) arr = np.arange(100) % 17 arr = arr.reshape((10,10)) key = 'mykey' store.dump(arr, key) arr2 = store.load(key) assert np.all(arr2 == arr) store.remove(key) store.close() def test_numpy_array_no_compress(tmpdir): try: import numpy as np except ImportError: pytest.skip() store = jug.backends.file_store.file_store(str(tmpdir), compress_numpy=False) arr = np.arange(100) % 17 arr = arr.reshape((10,10)) key = 'mykey' store.dump(arr, key) arr2 = store.load(key) assert np.all(arr2 == arr) store.remove(key) store.close()
WeCase/WeCase
utils/depgraph.py
Python
gpl-3.0
898
0.004454
#!/usr/bin/python3 import sys def process_import(filename, statement): statement = statement.replace(",", " ")
modules = statement.split() for module in modules[1:]: print('"%s" -> "%s"' % (filename, module)) def process_from(filename, statement): statement = statement.replace(",", " ") modules = statement.split() main_module = modules[1] for module in
modules[3:]: print('"%s" -> "%s" -> "%s"' % (filename, main_module, module)) def print_header(): print("digraph WeCase {") print("ratio=2") def print_footer(): print("}") print_header() for line in sys.stdin: line = line.replace("\n", "") if line.endswith(".py"): filename = line else: if line.startswith("import"): process_import(filename, line) elif line.startswith("from"): process_from(filename, line) print_footer()
Suor/flaws
astpp.py
Python
bsd-2-clause
2,283
0.001752
""" A pretty-printing dump function for the ast module. The code was copied fro
m the ast.dump function and modified slightly to pretty-print. Alex Leone (acleone ~AT~ gmail.com), 2010-01-30 """ from ast import * def dump(node, annotate_fields=True, include_attributes=False, indent=' '): """ Return a formatted dump of the tree in *node*. This is mainly useful for debugging purposes. The returned string will show the names a
nd the values for fields. This makes the code impossible to evaluate, so if evaluation is wanted *annotate_fields* must be set to False. Attributes such as line numbers and column offsets are not dumped by default. If this is wanted, *include_attributes* can be set to True. """ def _format(node, level=0): if isinstance(node, AST): fields = [(a, _format(b, level)) for a, b in iter_fields(node)] if include_attributes and node._attributes: fields.extend([(a, _format(getattr(node, a), level)) for a in node._attributes]) return ''.join([ node.__class__.__name__, '(', ', '.join(('%s=%s' % field for field in fields) if annotate_fields else (b for a, b in fields)), ')']) elif isinstance(node, list): lines = ['['] lines.extend((indent * (level + 2) + _format(x, level + 2) + ',' for x in node)) if len(lines) > 1: lines.append(indent * (level + 1) + ']') else: lines[-1] += ']' return '\n'.join(lines) return repr(node) if isinstance(node, list): return '\n'.join(_format(n) for n in node) if not isinstance(node, AST): raise TypeError('expected AST, got %r' % node.__class__.__name__) return _format(node) if __name__ == '__main__': import sys for filename in sys.argv[1:]: print('=' * 50) print('AST tree for', filename) print('=' * 50) f = open(filename, 'r') fstr = f.read() f.close() print(dump(parse(fstr, filename=filename), include_attributes=True)) print()
TomAugspurger/pandas
pandas/core/arrays/sparse/__init__.py
Python
bsd-3-clause
273
0.003663
# flake8: noqa: F401 from pandas.core.arrays.sparse.accessor import SparseAccessor, SparseFrameAccessor from pandas.core.arrays.sparse.array import ( BlockIndex, IntInde
x, SparseArray
, _make_index, ) from pandas.core.arrays.sparse.dtype import SparseDtype
DexterLB/stard
src/stard/test_samples/father.py
Python
mit
144
0.006944
fro
m stard.services import BaseService class Service(BaseService): def init_service(self): self.children = {self.service('child')}
amaozhao/basecms
cms/forms/widgets.py
Python
mit
10,371
0.004339
# -*- coding: utf-8 -*- from itertools import chain from django.contrib.sites.models import Site from django.core.urlresolvers import NoReverseMatch, reverse_lazy from django.forms.widgets import Select, MultiWidget, TextInput from django.utils.encoding import force_text from django.utils.safestring import mark_safe from django.utils.translation import ugettext as _ from cms.forms.utils import get_site_choices, get_page_choices from cms.models import Page, PageUser from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE from cms.utils.compat.dj import force_unicode class PageSelectWidget(MultiWidget): """A widget that allows selecting a page by first selecting a site and then a page on that site in a two step process. """ def __init__(self, site_choices=None, page_choices=None, attrs=None): if attrs is not None: self.attrs = attrs.copy() else: self.attrs = {} self.choices = [] super(PageSelectWidget, self).__init__((Select, Select, Select), attrs) def decompress(self, value): """ receives a page_id in value and returns the site_id and page_id of that page or the current site_id and None if no page_id is given. """ if value: page = Page.objects.get(pk=value) site = page.site return [site.pk, page.pk, page.pk] site = Site.objects.get_current() return [site.pk,None,None] def _has_changed(self, initial, data): # THIS IS A COPY OF django.forms.widgets.Widget._has_changed() # (except for the first if statement) """ Return True if data differs from initial. """ # For purposes of seeing whether something has changed, None is # the same as an empty string, if the data or inital value we get # is None, replace it w/ u''. if data is None or (len(data)>=2 and data[1] in [None,'']): data_value = u'' else: data_value = data if initial is None: initial_value = u'' else: initial_value = initial if force_unicode(initial_value) != force_unicode(data_value): return True return False def render(self, name, value, attrs=None): # THIS IS A COPY OF django.forms.widgets.MultiWidget.render() # (except for the last line) # value is a list of values, each corresponding to a widget # in self.widgets. site_choices = get_site_choices() page_choices = get_page_choices() self.site_choices = site_choices self.choices = page_choices self.widgets = (Select(choices=site_choices ), Select(choices=[('', '----')]), Select(choices=self.choices, attrs={'style': "display:none;"} ), ) if not isinstance(value, list): value = self.decompress(value) output = [] final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) for i, widget in enumerate(self.widgets): try: widget_value = value[i] except IndexError: widget_value = None if id_: final_attrs = dict(final_attrs, id='%s_%s' % (id_, i)) output.append(widget.render(name + '_%s' % i, widget_value, final_attrs)) output.append(r'''<script type="text/javascript"> (function($) { var handleSiteChange = function(site_name, selected_id) { $("#id_%(name)s_1 optgroup").remove(); var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name + "']").clone(); $("#id_%(name)s_1").append(myOptions); $("#id_%(name)s_1").change(); }; var handlePageChange = function(page_id) { if (page_id) { $("#id_%(name)s_2 option").removeAttr('selected'); $("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected'); } else { $("#id_%(name)s_2 option[value=]").attr('selected','selected'); }; }; $("#id_%(name)s_0").change(function(){ var site_label = $("#id_%(name)s_0").children(":selected").text(); handleSiteChange( site_label ); }); $("#id_%(name)s_1").change(function(){ var page_id = $(this).find('option:selected').val(); handlePageChange( page_id ); }); $(function(){ handleSiteChange( $("#id_%(name)s_0").children(":selected").text() ); $("#add_id_%(name)s").hide(); }); })(django.jQuery); </script>''' % {'name': name}) return mark_safe(self.format_output(output)) def format_output(self, rendered_widgets): return u' '.join(rendered_widgets) class PageSmartLinkWidget(TextInput): def __init__(self, attrs=None, ajax_view=None): super(PageSmartLinkWidget, self).__init__(attrs) self.ajax_url = self.get_ajax_url(ajax_view=ajax_view) def get_ajax_url(self, ajax_view): try: return reverse_lazy(ajax_view) except NoReverseMatch: raise Exception( 'You should provide an ajax_view argument that can be reversed to the PageSmartLinkWidget' ) def render(self, name=None, value=None, attrs=None): final_attrs = self.build_attrs(attrs) id_ = final_attrs.get('id', None) output = [r'''<script type="text/javascript"> (function($){ $(function(){ $("#%(element_id)s").select2({ placeholder: "%(placeholder_text)s", allowClear: true, minimumInputLength: 3, ajax: { url: "%(ajax_url)s", dataType: 'json', data: function (term, page) { return { q: term, // search term language_code: '%(language_code)s' }; }, results: function (data, page) { return { more: false, results: $.map(data, function(item, i){ return { 'id':item.redirect_url, 'text': item.title + ' (/' +
item.path + ')'} } ) }; } }, // Allow creation of new entries createSearchChoice:function(term, data) { if ($(data).filter(function() { return this.text.localeCompare(term)===0; }).length===0) {return {id:term, text:term};} }, multiple: false, initSelec
tion : function (element, callback) { var initialValue = element.val() callback({id:initialValue, text: initialValue}); } }); }) })(django.jQuery); </script>''' % { 'element_id': id_, 'placeholder_text': final_attrs.get('placeholder_text', ''), 'language_code': self.language, 'ajax_url': force_unicode(self.ajax_url) }] output.append(super(PageSmartLinkWidget, self).render(name, value, attrs)) return mark_safe(u''.join(output)) class Media: css = { 'all': ('cms/js/select2/select2.css', 'cms/js/select2/select2-bootstrap.css',) } js = (#'cms/js/libs/jquery.min.js', 'cms/js/select2/select2.js',) class UserSelectAdminWidget(Select): """Special widget used in page permission inlines, because we have to render an add user (plus) icon, but point it somewhere else - to special user creation view, which is accessible only if user haves "add user" permissions. Current user should be assigned to widget in form constructor as an user attribute. """ def render(self, name, value, attrs=None, choices=()): output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)] if hasattr(self, 'user') and (self.user.is_superuser or \ self.user.has_perm(PageUser._meta.app_label + '.' +
timorieber/wagtail
wagtail/admin/rich_text/editors/draftail/features.py
Python
bsd-3-clause
2,173
0.001381
from django.forms import Media from wagtail.admin.staticfiles import versioned_static # Feature objects: these are mapped to feature identifiers within the rich text # feature registry (wagtail.core.rich_text.features). Each one implements # a `construct_options` method which modifies an options dict as appropriate to # enable that feature. # Additionally, a Feature object defines a media property # (https://docs.djangoproject.com/en/stable/topics/forms/media/) to specify css/js # files to import when the feature is active. class Feature: def __init__(self, js=None, css=None): self.js = js or [] self.css = css or {} @property def media(self): js = [versioned_static(js_file) for js_file in self.js] css = {} for media_type, css_files in self.css.items(): css[media_type] = [versioned_static(css_file) for css_file in css_files] return Media(js=js, css=css) class BooleanFeature(Feature): """
A feature which is enabled by a boolean flag at the
top level of the options dict """ def __init__(self, option_name, **kwargs): super().__init__(**kwargs) self.option_name = option_name def construct_options(self, options): options[self.option_name] = True class ListFeature(Feature): """ Abstract class for features that are defined in a list within the options dict. Subclasses must define option_name """ def __init__(self, data, **kwargs): super().__init__(**kwargs) self.data = data def construct_options(self, options): if self.option_name not in options: options[self.option_name] = [] options[self.option_name].append(self.data) class EntityFeature(ListFeature): """A feature which is listed in the entityTypes list of the options""" option_name = 'entityTypes' class BlockFeature(ListFeature): """A feature which is listed in the blockTypes list of the options""" option_name = 'blockTypes' class InlineStyleFeature(ListFeature): """A feature which is listed in the inlineStyles list of the options""" option_name = 'inlineStyles'
zhongql/summer
tool/initdb.py
Python
mit
448
0.002232
from contextlib import closing from flask import current_app from summer.app import create_app from summer.db.connect import connect_db def init_db():
app = create_app('product') _context = app.app_context() _context.push() with closing(connect_db()) as db: with open('./sum
mer/schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit() if __name__ == '__main__': init_db()
joshrule/LOTlib
LOTlib/Projects/NumberGame/__init__.py
Python
gpl-3.0
21
0
from Model impor
t *
idjaw/netman
netman/api/api_utils.py
Python
apache-2.0
3,822
0.00157
# Copyright 2015 Internap. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may
obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language
governing permissions and # limitations under the License. from functools import wraps import json import logging from flask import make_response, request, Response, current_app from werkzeug.routing import BaseConverter from netman.api import NETMAN_API_VERSION from netman.core.objects.exceptions import UnknownResource, Conflict, InvalidValue def to_response(fn): @wraps(fn) def wrapper(self, *args, **kwargs): try: result = fn(self, *args, **kwargs) if isinstance(result, Response): return result else: code, data = result if data is not None: response = json_response(data, code) else: response = make_response("", code) except InvalidValue as e: response = exception_to_response(e, 400) except UnknownResource as e: response = exception_to_response(e, 404) except Conflict as e: response = exception_to_response(e, 409) except NotImplementedError as e: response = exception_to_response(e, 501) except Exception as e: logging.exception(e) response = exception_to_response(e, 500) self.logger.info("Responding {} : {}".format(response.status_code, response.data)) if 'Netman-Max-Version' in request.headers: response.headers['Netman-Version'] = min( float(request.headers['Netman-Max-Version']), NETMAN_API_VERSION) return response return wrapper def exception_to_response(exception, code): data = {'error': str(exception)} if "Netman-Verbose-Errors" in request.headers: if hasattr(exception, "__module__"): data["error-module"] = exception.__module__ data["error-class"] = exception.__class__.__name__ else: if data['error'] == "": if hasattr(exception, "__module__"): data['error'] = "Unexpected error: {}.{}".format(exception.__module__, exception.__class__.__name__) else: data['error'] = "Unexpected error: {}".format(exception.__class__.__name__) response = json_response(data, code) response.status_code = code return response def json_response(data, code): json_data = json.dumps(data, indent=None) response = current_app.response_class(json_data, mimetype='application/json; charset=UTF-8') response.status_code = code return response class RegexConverter(BaseConverter): def __init__(self, url_map, *items): super(RegexConverter, self).__init__(url_map) self.regex = items[0] class BadRequest(InvalidValue): pass class MultiContext(object): def __init__(self, switch_api, parameters, *contexts): self.context_instances = [] for context in contexts: obj = context(switch_api) obj.process(parameters) self.context_instances.append(obj) self.parameters = parameters def __enter__(self): return [(obj.__enter__()) for obj in self.context_instances] def __exit__(self, type_, value, traceback): for context in self.context_instances: context.__exit__(type_, value, traceback)
krafczyk/spack
var/spack/repos/builtin/packages/bwtool/package.py
Python
lgpl-2.1
1,568
0.000638
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Softwar
e Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR
A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Bwtool(AutotoolsPackage): """bwtool is a command-line utility for bigWig files.""" homepage = "https://github.com/CRG-Barcelona/bwtool" url = "https://github.com/CRG-Barcelona/bwtool/archive/1.0.tar.gz" version('1.0', 'cdd7a34ae457b587edfe7dc8a0bdbedd') depends_on('libbeato')
ethan-nelson/osm-tasking-manager2
osmtm/__init__.py
Python
bsd-2-clause
7,927
0.007191
import bleach from pyramid.config import Configurator from pyramid.authentication import AuthTktAuthenticationPolicy from pyramid.authorization import ACLAuthorizationPolicy from pyramid.session import UnencryptedCookieSessionFactoryConfig from sqlalchemy import engine_from_config from .models import ( DBSession, Base, ) from .utils import load_local_settings from sqlalchemy_i18n.manager import translation_manager from .security import ( RootFactory, group_membership, ) from .views.task import check_task_expiration from apscheduler.schedulers.background import BackgroundScheduler scheduler = BackgroundScheduler() scheduler.start() def main(global_config, **settings): """ This function returns a Pyramid WSGI application. """ settings['mako.directories'] = 'osmtm:templates' load_local_settings(settings) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) Base.metadata.bind = engine authn_policy = AuthTktAuthenticationPolicy( secret='super_secret', callback=group_membership)
authz_policy = ACLAuthorizationPolicy() config = Configurator(settings=settings, root_factory=RootFactory,
authentication_policy=authn_policy, authorization_policy=authz_policy) # fixes backwards incompatibilities when running Pyramid 1.5a # https://pypi.python.org/pypi/pyramid#features config.include('pyramid_mako') # pyramid_tm uses the transaction module to begin/commit/rollback # transaction when requests begin/end. config.include('pyramid_tm') # enable exception logger config.include('pyramid_exclog') session_factory = UnencryptedCookieSessionFactoryConfig('itsasecret') config.set_session_factory(session_factory) config.add_static_view('static', 'static', cachebust=True) config.add_route('home', '/') config.add_route('home_json', '/projects.json') config.add_route('about', '/about') config.add_route('login', '/login') config.add_route('logout', '/logout') config.add_route('oauth_callback', '/oauth_callback') config.add_route('project_new', '/project/new') config.add_route('project_new_grid', '/project/new/grid') config.add_route('project_new_arbitrary', '/project/new/arbitrary') config.add_route('project_grid_simulate', '/project/grid_simulate') config.add_route('project_json', '/project/{project:\d+}.json') config.add_route('project', '/project/{project:\d+}') config.add_route('project_edit', '/project/{project:\d+}/edit') config.add_route('project_publish', '/project/{project:\d+}/publish') config.add_route('project_check_for_update', '/project/{project:\d+}/check_for_updates') config.add_route('project_contributors', '/project/{project:\d+}/contributors', xhr=True) config.add_route('project_stats', '/project/{project:\d+}/stats') config.add_route('project_tasks_json', '/project/{project:\d+}/tasks.json') config.add_route('project_user_add', '/project/{project:\d+}/user/{user}', request_method="PUT") config.add_route('project_user_delete', '/project/{project:\d+}/user/{user}', request_method="DELETE") config.add_route('project_preset', '/project/{project:\d+}/preset') config.add_route('project_users', '/project/{project:\d+}/users') config.add_route('project_invalidate_all', '/project/{project:\d+}/invalidate_all') config.add_route('project_message_all', '/project/{project:\d+}/message_all') config.add_route('task_random', '/project/{project:\d+}/random', xhr=True) config.add_route('task_empty', '/project/{project:\d+}/task/empty', xhr=True) config.add_route('task_xhr', '/project/{project:\d+}/task/{task:\d+}', xhr=True) config.add_route('task_done', '/project/{project:\d+}/task/{task:\d+}/done', xhr=True) config.add_route('task_lock', '/project/{project:\d+}/task/{task:\d+}/lock', xhr=True) config.add_route('task_unlock', '/project/{project:\d+}/task/{task:\d+}/unlock', xhr=True) config.add_route('task_split', '/project/{project:\d+}/task/{task:\d+}/split', xhr=True) config.add_route('task_validate', '/project/{project:\d+}/task/{task:\d+}/validate', xhr=True) config.add_route('task_cancel_done', '/project/{project:\d+}/task/{task:\d+}/cancel_done', xhr=True) config.add_route('task_comment', '/project/{project:\d+}/task/{task:\d+}/comment', xhr=True) config.add_route('task_gpx', '/project/{project:\d+}/task/{task:\d+}.gpx') config.add_route('task_osm', '/project/{project:\d+}/task/{task:\d+}.osm') config.add_route('task_assign', '/project/{project:\d+}/task/{task:\d+}/user/{user}', xhr=True) config.add_route('task_assign_delete', '/project/{project:\d+}/task/{task:\d+}/user', xhr=True, request_method="DELETE") config.add_route('task_difficulty', '/project/{project:\d+}/task/{task:\d+}/difficulty/' + '{difficulty:\d+}', xhr=True) config.add_route('task_difficulty_delete', '/project/{project:\d+}/task/{task:\d+}/difficulty', xhr=True, request_method='DELETE') config.add_route('task_users', '/project/{project:\d+}/task/{task:\d+}/users') config.add_route('labels', '/labels') config.add_route('label_new', '/label/new') config.add_route('label_edit', '/label/{label:\d+}/edit') config.add_route('label_delete', '/label/{label:\d+}/delete') config.add_route('users', '/users') config.add_route('users_json', '/users.json') config.add_route('user_messages', '/user/messages') config.add_route('user_messages_check', '/user/messages/check') config.add_route('user', '/user/{username}') config.add_route('user_admin', '/user/{id:\d+}/admin') config.add_route('user_project_manager', '/user/{id:\d+}/project_manager') config.add_route('user_validator', '/user/{id:\d+}/validator') config.add_route('user_experienced_mapper', '/user/{id:\d+}/experienced_mapper') config.add_route('user_prefered_editor', '/user/prefered_editor/{editor}', xhr=True) config.add_route('user_prefered_language', '/user/prefered_language/{language}', xhr=True) config.add_route('licenses', '/licenses') config.add_route('license_new', '/license/new') config.add_route('license', '/license/{license:\d+}') config.add_route('license_edit', '/license/{license:\d+}/edit') config.add_route('license_delete', '/license/{license:\d+}/delete') config.add_route('message_read', '/message/read/{message:\d+}') config.add_route('markdown_ref', '/markdown_ref') config.add_translation_dirs('osmtm:locale') config.set_locale_negotiator('osmtm.i18n.custom_locale_negotiator') translation_manager.options.update({ 'locales': settings['available_languages'].split(), 'get_locale_fallback': True }) config.scan(ignore=['osmtm.tests', 'osmtm.scripts']) bleach.ALLOWED_TAGS.append(u'p') bleach.ALLOWED_TAGS.append(u'pre') check_expiration_interval = int( settings.get('check_expiration_interval', 5) ) scheduler.add_job(check_task_expiration, 'interval', seconds=check_expiration_interval, replace_existing=True) return config.make_wsgi_app()
justinmnoor/geodjangotemplate
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/settings/local.py
Python
mit
457
0.002188
# DJANGO 1.10.5 LOCAL SETTINGS # https://docs.djangoproject.com/en/1.10/topics/settings/ # ============================================================================
====================== from .base import * DEBUG = True # APP CONFIGURATION # https://docs.djangoproject.com/en/1.10/ref/applications # ================================================================================================== # Add your local apps here I
NSTALLED_APPS += []
jtaghiyar/kronos
kronos/plumber.py
Python
mit
28,257
0.008635
""" Created on Mar 8, 2014 @author: jtaghiyar """ from helpers import validate_argument import logging class Plumber(object): """ pipe components into a pipeline based on the given configuration and generate a python script. """ def __init__(self, pipeline_script, workflow=None): ## the file where the resultant script is written self.pipeline_script = pipeline_script if workflow is not None: self.wf = workflow @property def wf(self): return self._wf @wf.setter def wf(self, value): self._wf = value self._wf.inflate() self.tags = [t for t in self.wf.bfs()] self.modes = dict((n.tag,n.use_cluster) for n in self.wf.nodes.values()) self.mems = dict((n.tag,n.memory) for n in self.wf.nodes.values()) self.num_cpus = dict((n.tag,n.num_cpus) for n in self.wf.nodes.values()) self.parent_tasks = {} self.io_connections = {} self.input_arguments = {} self.decors = {} self.func_strs = {} self.component_names = [n.component_name for n in self.wf.nodes.values()] self.import_components = {k:['component_main as ' + k + '_main'] for k in self.component_names if k != 'breakpoint'} def make_script(self, sample_id): logging.info('making pipeline for %s ...' % (sample_id)) import_pymod = { 'os' :[], 'sys' :[], 'time' :[], 'ruffus' :[], 'traceback' :[], 'multiprocessing' :['Queue'], } import_factory = { 'kronos.pipelineui' :[], 'kronos.run_manager' :['RunManager'], 'kronos.job_manager' :[ 'DrmaaJobManager', 'SgeJobManager', 'LocalJobManager' ], 'kronos.utils' :['Task'],
'kronos.helpers' :[ 'JobFa
ilureError', 'flushqueue' ], 'kronos.logger' :[ 'PipelineLogger', 'LogWarnErr', 'LogInfo' ], } self._get_parent_tasks() self._get_io_connections() self._get_input_arguments() self._get_decorators() self._get_function_signatures() self._write_importing(import_pymod, comment="import python modules as well as pipelinui") self._write_importing(import_factory, comment="import factory modules") self._write_initilization() self._write_env_preparation() self._write_importing(self.import_components, comment="import components") self._write_generating_tasks() self._write_ruffus_pipeline() self._write_last_task() self._write_main() logging.info('successfully completed.') def paste_pipelines(self, pipelines, config_file): """paste all the pipelines scripts into a single one.""" ## store ruffus tasks in a list task_funcs = [] import_dict = { 'os' :[], 'sys' :[], 'ruffus' :[], 'subprocess' :[], 'datetime' :['datetime'], 'multiprocessing' :['Queue'], 'kronos.pipelineui' :[], 'kronos.helpers' :[ 'make_dir', 'kill_jobs', 'flushqueue', 'make_intermediate_cmd_args', 'KeywordsManager' ], 'kronos.logger' :[ 'PipelineLogger', 'LogWarnErr', 'LogInfo' ], 'kronos.kronos_version' :['kronos_version'] } self._write_config_file(config_file) self._write_importing(import_dict) self._write_argument_validation() self._write_logger() self._print(comment="ruffus pipeline") for i,p in enumerate(pipelines): n = p.pipeline_name s = p.sample_id cmd = '"{python_installation} {4}/{0}.py ' cmd += '--pipeline_name {0} --working_dir {working_dir} ' cmd += '--components_dir {components_dir} --run_id {1} ' cmd += '--drmaa_library_path {drmaa_library_path} ' cmd += "--sample_id {2} --qsub_options '{qsub_options}' " cmd += '--num_jobs {num_jobs} --log_file {3} ' cmd += '--extension {extension}" ' cmd += '.format(pipeline_name, run_id, sample_id, log_file, intermediate_path, **args)' self._print(message="@ruffus.follows()") self._print(message="@LogWarnErr(l)") self._print(message="@LogInfo(l)") self._print(message="def task_{0}(pipeline_name='{1}'):".format(i,n)) self._print(message="sample_id = '{}'".format(s), tab=1) self._print(message="intermediate_path = os.path.join(os.path.dirname(sys.argv[0]),'intermediate_pipeline_scripts')", tab=1) self._print(message="pipeline_script = '{0}/{1}.py'.format(intermediate_path, pipeline_name)", nl=True, tab=1) self._print(message="args['pipeline_name'] = pipeline_name", tab=1) self._print(message="args['run_id'] = run_id", tab=1) self._print(message="args['sample_id'] = sample_id", tab=1) self._print(message="args['log_file'] = log_file", nl=True, tab=1) self._print(message="km = KeywordsManager(pipeline_name, run_id, sample_id, args['working_dir'])", tab=1) self._print(message="old_script_content = open(pipeline_script, 'r').read()", tab=1) self._print(message="new_script_content = km.replace_keywords(old_script_content)", tab=1) self._print(message="f = open(pipeline_script, 'w')", tab=1) self._print(message="f.write(new_script_content)", tab=1) self._print(message="f.close()", nl=True, tab=1) self._print(message="cmd = '{0} {1}'.format(args['python_installation'], pipeline_script)", tab=1) self._print(message="cmd_args = make_intermediate_cmd_args(args)", tab=1) self._print(message="cmd = cmd + ' ' + ' '.join(cmd_args)", tab=1) self._print(message="print 'running {} pipeline with command: %s' % (cmd)".format(n), nl=True, tab=1) self._print(message="proc = subprocess.Popen(cmd, shell=True)", tab=1) self._print(message="job_ids.put(proc.pid)", tab=1) self._print(message="try:", tab=1) self._print(message="cmdout, cmderr = proc.communicate()", tab=2) self._print(message="job_rcs.put(proc.returncode)", tab=2) self._print(message="except:", tab=1) self._print(message="cmd = 'kill %s' % (proc.pid)", tab=2) self._print(message="os.system(cmd)", tab=2) self._print(message="finally:", tab=1) self._print(message="print '{} pipeline finished with exit code %s' % (proc.returncode)".format(n), nl=True, tab=2) self._print(message="if cmdout is not None:", tab=1) self._print(mess
wasw100/pycaldav
pycaldav/lib/url.py
Python
gpl-3.0
5,906
0.004233
#!/usr/bin/env python # -*- encoding: utf-8 -*- import urlparse def uc2utf8(input): ## argh! this feels wrong, but seems to be needed. if type(input) == unicode: return input.encode('utf-8') else: return input class URL: """ This class is for wrapping URLs into objects. It's used internally in the library, end users should not need to know anything about this class. All methods that accept URLs can be fed either with an URL object, a string or an urlparse.ParsedURL object. Addresses may be one out of three: 1) a path relative to the DAV-root, i.e. "someuser/calendar" may refer to "http://my.davical-server.example.com/pycaldav.php/someuser/calendar". 2) an absolute path, i.e. "/pycaldav.php/someuser/calendar" 3) a fully qualified URL, i.e. "http://someuser:somepass@my.davical-server.example.com/pycaldav.php/someuser/calendar". Remark that hostname, port, user, pass is typically given when instantiating the DAVClient object and cannot be overridden later. As of 2013-11, some methods in the pycaldav library expected strings and some expected urlparse.ParseResult objects, some expected fully qualified URLs and most expected absolute paths. The purpose of this class is to ensure consistency and at the same tim
e maintaining backward compatibility. Basically, all methods should accept any kind of URL. """ def __init__(self, url): if isinstance(url, urlparse.ParseResult) or isinstance(url, urlparse.SplitResult): self.url_parsed = url self.url_raw = None else: self.
url_raw = url self.url_parsed = None def __nonzero__(self): if self.url_raw or self.url_parsed: return True else: return False def __ne__(self, other): return not self == other def __eq__(self, other): if str(self) == str(other): return True ## The URLs could have insignificant differences me = self.canonical() if hasattr(other, 'canonical'): other = other.canonical() return str(me) == str(other) ## TODO: better naming? Will return url if url is already an URL ## object, else will instantiate a new URL object @classmethod def objectify(self, url): if url is None: return None if isinstance(url, URL): return url else: return URL(url) ## To deal with all kind of methods/properties in the ParseResult ## class def __getattr__(self, attr): if self.url_parsed is None: self.url_parsed = urlparse.urlparse(self.url_raw) if hasattr(self.url_parsed, attr): return getattr(self.url_parsed, attr) else: return getattr(self.__unicode__(), attr) ## returns the url in text format def __str__(self): return self.__unicode__().encode('utf-8') ## returns the url in text format def __unicode__(self): if self.url_raw is None: self.url_raw = self.url_parsed.geturl() if isinstance(self.url_raw, unicode): return self.url_raw else: return unicode(self.url_raw, 'utf-8') def __repr__(self): return "URL(%s)" % str(self) def is_auth(self): return self.username is not None def unauth(self): if not self.is_auth(): return self return URL.objectify(urlparse.ParseResult( self.scheme, '%s:%s' % (self.hostname, self.port), self.path.replace('//', '/'), self.params, self.query, self.fragment)) def canonical(self): """ a canonical URL ... remove authentication details, make sure there are no double slashes, and to make sure the URL is always the same, run it through the urlparser """ url = self.unauth() ## this is actually already done in the unauth method ... if '//' in url.path: raise NotImplementedError("remove the double slashes") ## TODO: optimize - we're going to burn some CPU cycles here if url.endswith('/'): url = URL.objectify(str(url)[:-1]) ## This looks like a noop - but it may have the side effect ## that urlparser be run (actually not - unauth ensures we ## have an urlparse.ParseResult object) url.scheme ## make sure to delete the string version url.url_raw = None return url def join(self, path): """ assumes this object is the base URL or base path. If the path is relative, it should be appended to the base. If the path is absolute, it should be added to the connection details of self. If the path already contains connection details and the connection details differ from self, raise an error. """ if not path: return self path = URL.objectify(path) if ( (path.scheme and self.scheme and path.scheme != self.scheme) or (path.hostname and self.hostname and path.hostname != self.hostname) or (path.port and self.port and path.port != self.port) ): raise ValueError("%s can't be joined with %s" % (self, path)) if path.path[0] == '/': ret_path = uc2utf8(path.path) else: sep = "/" if self.path.endswith("/"): sep = "" ret_path = "%s%s%s" % (self.path, sep, uc2utf8(path.path)) return URL(urlparse.ParseResult( self.scheme or path.scheme, self.netloc or path.netloc, ret_path, path.params, path.query, path.fragment)) def make(url): """Backward compatibility""" return URL.objectify(url)
subssn21/notorm
notorm/momoko.py
Python
mit
919
0.015234
import notorm import momoko from tornado import gen import psycopg2.extras class AsyncRecord(notorm.record): @gen.coroutine def update(self, **args): for k,v in args.items(): setattr(self, k, v) cursor = yield notorm.db.execute( self.update_qry, self._asdict(), cursor_factory=psycopg2.extras.NamedTupleCursor) @gen.coroutine def save(self): if self.id:
self.update() else: cursor = yield notorm.db.execute(
self.insert_qry, self.__dict__, cursor_factory=psycopg2.extras.NamedTupleCursor) results = cursor.fetchone() if results: self.id = results[0]
DanielNeugebauer/adhocracy
src/adhocracy/lib/tiles/selection_tiles.py
Python
agpl-3.0
2,711
0.000369
from pylons import tmpl_context as c from adhocracy.lib.auth import can from util import render_tile, Bas
eTile class VariantRow(object): def __init__(self, tile, variant, poll): self.tile = tile self.variant = variant self.poll = poll if tile.frozen: freeze_time = tile.selection.proposal.adopt_poll.beg
in_time self.text = tile.selection.page.variant_at(variant, freeze_time) else: self.text = tile.selection.page.variant_head(variant) @property def selected(self): return self.tile.selected == self.variant @property def show(self): return not self.tile.frozen or self.selected @property def can_edit(self): return (not self.tile.frozen) and \ can.variant.edit(self.tile.selection.page, self.variant) @property def num_comments(self): return len(self.tile.selection.page.variant_comments(self.variant)) class SelectionTile(BaseTile): def __init__(self, selection): self.selection = selection self.selected = selection.selected self.variant_polls = self.selection.variant_polls @property def has_variants(self): return len(self.selection.page.variants) < 2 @property def num_variants(self): return len(self.selection.page.variants) - 1 @property def selected_text(self): variant = self.selected if self.frozen: freeze_time = self.selection.proposal.adopt_poll.begin_time return self.selection.page.variant_at(variant, freeze_time) else: return self.selection.page.variant_head(variant) @property def selected_num_comments(self): return len(self.selection.page.variant_comments(self.selected)) @property def frozen(self): return self.selection.proposal.is_adopt_polling() def variant_rows(self): for (variant, poll) in self.variant_polls: row = VariantRow(self, variant, poll) yield row @property def show_new_variant_link(self): if self.frozen: return False return can.norm.edit(self.selection.page, 'any') def row(selection): if not selection or selection.is_deleted(): return "" tile = SelectionTile(selection) return render_tile('/selection/tiles.html', 'row', tile, selection=selection, user=c.user, cached=True) def variants(selection, tile=None): if tile is None: tile = SelectionTile(selection) return render_tile('/selection/tiles.html', 'variants', tile, selection=selection, user=c.user, cached=True)
tjanez/celery-demo-app
test/test.py
Python
gpl-3.0
350
0.002857
from __future__ import absolute_import # Start a Celery worker by execu
ting: # celery -A proj worker -l info # Import available tasks from proj.tasks import add, mul, xsum, fib # Test short-running tasks add.delay(2, 2) mul.delay(10, 12) xsum.delay(range(100)) fib.delay(10)
# Test medium-running tasks fib.delay(35) fib.delay(35) fib.delay(35)
heytcass/homeassistant-config
deps/cherrypy/test/test_etags.py
Python
mit
3,093
0
import cherrypy from cherrypy._cpcompat import ntou from cherrypy.test import helper class ETagTest(helper.CPWebCase): @staticmethod def setup_server(): class Root: @cherrypy.expose def resource(self): return "Oh wah ta goo Siam." @cherrypy.expose def fail(self, code): code = int(code) if 300 <= code <= 399: raise cherrypy.HTTPRedirect([], code) else: raise cherrypy.HTTPError(code) @cherrypy.expose # In Python 3, tools.encode is on by default @cherrypy.config(**{'tools.encode.on': True}) def unicoded(self): return ntou('I am a \u1ee4nicode string.', 'escape') conf = {'/': {'tools.etags.on': True, 'tools.etags.autotags': True, }} cherrypy.tree.mount(Root(), config=conf) def test_etags(self): self.getPage("/resource") self.assertStatus('200 OK') self.assertHeader('Content-Type', 'text/html;charset=utf-8') self.assertBody('Oh wah ta goo Siam.') etag = self.assertHeader('ETag') # Test If-Match (both valid and invalid) self.getPage("/resource", headers=[('If-Match', etag)]) self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "*")]) self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "*")], method="POST") self.assertStatus("200 OK") self.getPage("/resource", headers=[('If-Match', "a bogus tag")]) self.assertStatus("412 Precondition Failed") # Test If-None-Match (both valid and invalid) self.getPage("/resource", headers=[('If-None-Match', etag)]) self.assertStatus(304) self.getPage("/resource", method='POST', headers=[('If-None-Match', etag)]) self.assertStatus("412 Precondition Failed") self.getPage("/resource", headers=[('If-None-Match', "*")]) self.assertStatus(304) self.getPage("/resource", headers=[('If-None-Match', "a bogus tag")]) self.assertStatus("200 OK") def test_errors(self): self.getPage("/resource") self.assertStatus(200) etag = self.assertHeader('ETag') # Test raising errors in page handler self.getPage("/fail/412", headers=[('If-Match', etag)]) self.assertStatus(412) self.getPage("/fail/304", headers=[('If-Match', etag)]) self.assertStatus(304) self.getPage("/fail/412", headers=[('If-None-Match', "*")]) self.assertStatus(412) self.getPage("/fail/304", headers=[('If-None-Match', "*")]) self.assertStatus(304) def test_unicode_body(self): self.getPage("/unicoded") self.assert
Status(200) etag1 = self.assertHeader('ETag') self.getPage("/unicoded", headers=[('If-Match', etag1)]) self.assertStatus(200)
self.assertHeader('ETag', etag1)
daryllstrauss/tango
test_mat.py
Python
mit
2,661
0.001127
import unittest import matmath import numpy as np import math class TestMatrix(unittest.TestCase): def testRotX(self): mat = matmath.xRotationMatrix(math.radians(90)) pt = np.array([1, 0, 0, 1]) npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [1, 0, 0, 1]) pt = np.array([0, 1, 0, 1]) npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [0, 0, 1, 1]) pt = np.array([0, 0, 1, 1]) npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [0, -1, 0, 1]) def testRotY(self): pt = np.array([0, 0, 1, 1]) mat = matmath.yRotationMatrix(math.radians(90)) npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [1, 0, 0, 1]) def testRotZ(self): pt = np.array([1, 0, 0, 1]) mat = matmath.zRotationMatrix(math.radians(90)) npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [0, 1, 0, 1]) def testQuaternionMatrix(self): q = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90)) qmat = matmath.quaternionToRotationMatrix(q) rmat = matmath.xRotationMatrix(math.radians(90)) np.testing.assert_almost_equal(qmat, rmat) q = matmath.axisAngleToQuaternion([0, 1, 0], np.radians(90)) qmat = matmath.quaternionToRotationMatrix(q) rmat = matmath.yRotationMatrix(math.radians(90)) np.testing.assert_almost_equal(qmat, rmat) q = matmath.axisAngleToQuaternion([0, 0, 1], np.radians(90)) qmat = matmath.quaternionToRotationMatrix(q) rmat = matmath.zRotationMatrix(math.radians(90))
np.testing.assert_almost_equal(qmat, rmat) def testMultipleRotates(self): r1 = matmath.xRotationMatrix(np.radians(90)) r2 = matmath.zRotationMatrix(np.radians(90)) mat = r1.dot(r2) pt = np.array([0, 0, 1, 1])
npt = pt.dot(mat) np.testing.assert_almost_equal(npt, [1, 0, 0, 1]) def test2M(self): # 2 Meters away depth scan pt = np.array([0, 0, 2, 1]) print "PC", pt mat = matmath.pcToSoSMatrix() npt = pt.dot(mat) print "SoS ", npt trans = np.array([0, 0, 0]) quaternion = matmath.axisAngleToQuaternion([1, 0, 0], np.radians(90)) mat = matmath.getPC2WorldMatrix(trans, quaternion) npt = pt.dot(mat) print "Device", npt pt = np.array([0, 1, 2, 1]) print "PC", pt mat = matmath.pcToSoSMatrix() npt = pt.dot(mat) print "SoS ", npt mat = matmath.getPC2WorldMatrix(trans, quaternion) npt = pt.dot(mat) print "Device", npt
iulian787/spack
var/spack/repos/builtin/packages/voropp/package.py
Python
lgpl-2.1
1,349
0.001483
# Copyright 2013-2020 Lawrence Livermore National Sec
urity, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Voropp(MakefilePackage): """Voro++ is a open source software library for the computation of the Voronoi diagr
am, a widely-used tessellation that has applications in many scientific fields.""" homepage = "http://math.lbl.gov/voro++/about.html" url = "http://math.lbl.gov/voro++/download/dir/voro++-0.4.6.tar.gz" variant('pic', default=True, description='Position independent code') version('0.4.6', sha256='ef7970071ee2ce3800daa8723649ca069dc4c71cc25f0f7d22552387f3ea437e') def edit(self, spec, prefix): filter_file(r'CC=g\+\+', 'CC={0}'.format(self.compiler.cxx), 'config.mk') filter_file(r'PREFIX=/usr/local', 'PREFIX={0}'.format(self.prefix), 'config.mk') # We can safely replace the default CFLAGS which are: # CFLAGS=-Wall -ansi -pedantic -O3 cflags = '' if '+pic' in spec: cflags += self.compiler.cc_pic_flag filter_file(r'CFLAGS=.*', 'CFLAGS={0}'.format(cflags), 'config.mk')
servalproject/nikola
nikola/plugins/compile_rest/youtube.py
Python
mit
2,306
0
# Copyright (c) 2012 Roberto Alsina y otros. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from docutils import nodes from docutils.parsers.rst import Directive, directives CODE = """\ <iframe width="{width}" height="{height}" src="http://www.youtube.com/embed/{yid}?rel=0&amp;hd=1&amp;wmode=transparent" ></iframe>""" class Youtube(Directive): """ Restructured text extension for inserting youtube embedded videos Usage: .. youtube:: lyViVmaBQDg :height: 400 :width: 600 """ has_content = True required_arguments = 1 option_spec = { "width": directives.positive_int, "height": directives.positive_int, } def run(self): self.check_content() options = { 'yid': self.arguments[0], 'width': 425,
'height': 344, } options.update(self.options) return [nodes.raw('', CODE.form
at(**options), format='html')] def check_content(self): if self.content: raise self.warning("This directive does not accept content. The " "'key=value' format for options is deprecated, " "use ':key: value' instead") directives.register_directive('youtube', Youtube)
LettError/glyphNameFormatter
Lib/glyphNameFormatter/rangeProcessors/gujarati.py
Python
bsd-3-clause
668
0.008982
def process(self): #GUJARATI VOWEL SIGN CANDRA E #GUJARATI VOWEL CANDRA E self.edit("GUJARATI") self.edit("LETTER") self.edit("DIGIT") self.processAs("Helper Indic") self.edit("VOWEL SIGN", "sign") self.edit("VOWEL") self.edit("SIGN") self.edit("THREE-DOT NUKTA ABOVE", "threedotnuktaabove
") self.edit("TWO-CIRCL
E NUKTA ABOVE", "twocirclenuktaabove") self.processAs("Helper Numbers") self.lower() self.compress() self.scriptPrefix() if __name__ == "__main__": from glyphNameFormatter.exporters import printRange from glyphNameFormatter.tools import debug printRange("Gujarati") debug(0x0AFA)
micolous/helvetic
helvetic/views/aria_api.py
Python
agpl-3.0
5,411
0.030309
# -*- mode: python; indent-tabs-mode: nil; tab-width: 2 -*- """ aria_api.py - implements handlers which are for the Aria to talk to helvetic. """ from __future__ import absolute_import from base64 import b16encode from crc16 import crc16xmodem from datetime import timedelta from decimal import Decimal from django.contrib.auth.models import User from django.db import transaction from django.http import HttpResponse, HttpResponseForbidden, HttpResponseBadRequest from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.views.generic import View from string import hexdigits import struct from time import time from ..models import AuthorisationToken, Measurement, Scale, utcnow class ScaleValidateView(View): def get(self, request): # Context: https://github.com/micolous/helvetic/issues/1 # # Sometimes the scale is trying to verify that it authenticated with the # correct token. We don't really care about these requests (it is handled # by /scale/register aka ScaleRegisterView), so we can just always return # "T" (OK). # # The real service returns "F" on error. return HttpResponse('T') class ScaleRegisterView(View): def get(self, request): if 'serialNumber' not in request.GET: return HttpResponseBadRequest('serialNumber missing') if 'token' not in request.GET: return HttpResponseBadRequest('token missing') if 'ssid' not in request.GET: return Htt
pResponseBadRequest('ssid missing') serial = request.GET['serialNumber'].upper() token = request.GET['token'] ssid = request.GET['ssid'] if len(serial) != 12: return HttpResponseBadRequest('serialNumber must be 12 bytes') if any(((x not in hexdigits) for x in serial)): return HttpRespon
seBadRequest('serial must only contain hex') # Lookup the authorisation token auth_token = AuthorisationToken.lookup_token(token) if auth_token is None: return HttpResponseForbidden('Bad auth token') owner = auth_token.user # Delete the token. auth_token.delete() # Register the Aria scale = Scale.objects.create( hw_address=serial, ssid=ssid, owner=owner, ) # Only return 200 OK return HttpResponse('') class ScaleUploadView(View): @method_decorator(csrf_exempt) @method_decorator(transaction.atomic) def dispatch(self, *args, **kwargs): return super(ScaleUploadView, self).dispatch(*args, **kwargs) def post(self, request): now = utcnow() body = request.body # Version 3 protocol proto_ver, battery_pc, mac, auth_code = struct.unpack('<LL6s16s', body[:30]) body = body[30:] if proto_ver != 3: return HttpResponseBadRequest('Unknown protocol version: %d' % proto_ver) if battery_pc > 100 or battery_pc < 0: return HttpResponseBadRequest('Battery percentage must be 0..100 (got %d)' % battery_pc) mac, auth_code = [b16encode(x) for x in (mac, auth_code)] scale = None try: scale = Scale.objects.get(hw_address=mac) except Scale.DoesNotExist: return HttpResponseBadRequest('Unknown scale: %s' % mac) # Check authcode if scale.auth_code is None or scale.auth_code == '': scale.auth_code = auth_code elif scale.auth_code != auth_code: return HttpResponseForbidden('Invalid auth code') scale.battery_percent = battery_pc fw_ver, unknown2, scale_now, measurement_count = struct.unpack('<LLLL', body[:16]) body = body[16:] scale.fw_version = fw_ver scale.save() for x in range(measurement_count): if len(body) < 32: return HttpResponseBadRequest('Measurement truncated.') id2, imp, weight, ts, uid, fat1, covar, fat2 = \ struct.unpack('<LLLLLLLL', body[:32]) # Record the measurement # Look up the owner of this measurement if uid == 0: measured_user = None else: try: measured_user = User.objects.get(id=uid) except User.NotFound: measured_user = None measurement = Measurement.objects.create( user=measured_user, scale=scale, when=now - timedelta(seconds=scale_now - ts), weight=weight, body_fat=Decimal(fat1) / Decimal(1000), ) body = body[32:] # Formulate a response scale_users = scale.users.all() response = struct.pack('<LBBBL', int(time()), # Fill with current time, to account for processing delay scale.unit, 0x32, # status = configured 0x01, # unknown len(scale_users) ) # Insert user info for profile in scale_users: last_weight = min_var = max_var = 0 last_measurement = profile.latest_measurement() if last_measurement is not None: last_weight = ((last_measurement.weight) // 1000) * 1000 min_var = last_weight - 4000 if min_var < 0: min_var = 0 max_var = last_weight + 4000 response += struct.pack('<L16x20sLLLBLLLLLL', profile.user.id, profile.short_name_formatted(), min_var, max_var, profile.age(), profile.gender, profile.height, 0, # some weight 0, # body fat 0, # covariance 0, # another weight 0, # timestamp ) response = response + struct.pack('<LLL', 0, # always 0 3, # update status: no 0, # unknown ) trailer = 0x19 + (len(scale_users) * 0x4d) response = response + struct.pack('<HH', crc16xmodem(response), # checksum trailer, ) hr = HttpResponse(response) # Content-Length is a required element hr['Content-Length'] = str(len(response)) return hr
sbesson/zeroc-ice
scripts/IceGridAdmin.py
Python
gpl-2.0
11,612
0.007492
#!/usr/bin/env python # ********************************************************************** # # Copyright (c) 2003-2013 ZeroC, Inc. All rights reserved. # # This copy of Ice is licensed to you under the terms described in the # ICE_LICENSE file included in this distribution. # # ********************************************************************** import sys, os, TestUtil, shlex from threading import Thread # # Set nreplicas to a number N to test replication with N replicas. # #nreplicas=0 nreplicas=1 iceGridPort = 12010; nodeOptions = r' --Ice.Warn.Connections=0' + \ r' --IceGrid.Node.Endpoints=default' + \ r' --IceGrid.Node.WaitTime=240' + \ r' --Ice.ProgramName=icegridnode' + \ r' --IceGrid.Node.Trace.Replica=0' + \ r' --IceGrid.Node.Trace.Activator=0' + \ r' --IceGrid.Node.Trace.Adapter=0' + \ r' --IceGrid.Node.Trace.Server=0' + \ r' --IceGrid.Node.ThreadPool.SizeWarn=0' + \ r' --IceGrid.Node.PrintServersReady=node1' + \ r' --Ice.NullHandleAbort' + \ r' --Ice.ThreadPool.Server.Size=0' + \ r' --Ice.ServerIdleTime=0' registryOptions = r' --Ice.Warn.Connections=0' + \ r' --IceGrid.Registry.PermissionsVerifier=IceGrid/NullPermissionsVerifier' + \ r' --IceGrid.Registry.AdminPermissionsVerifier=IceGrid/NullPermissionsVerifier' + \ r' --IceGrid.Registry.SSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \ r' --IceGrid.Registry.AdminSSLPermissionsVerifier=IceGrid/NullSSLPermissionsVerifier' + \ r' --IceGrid.Registry.Server.Endpoints=default' + \ r' --IceGrid.Registry.Internal.Endpoints=default' + \ r' --IceGrid.Registry.SessionManager.Endpoints=default' + \ r' --IceGrid.Registry.AdminSessionManager.Endpoints=default' + \ r' --IceGrid.Registry.Trace.Session=0' + \ r' --IceGrid.Registry.Trace.Application=0' + \ r' --IceGrid.Registry.Trace.Node=0' + \ r' --IceGrid.Registry.Trace.Replica=0' + \ r' --IceGrid.Registry.Trace.Adapter=0' + \ r' --IceGrid.Registry.Trace.Object=0' + \ r' --IceGrid.Registry.Trace.Server=0' + \ r' --IceGrid.Registry.Trace.Locator=0' + \ r' --Ice.ThreadPool.Server.Size=0 ' + \ r' --Ice.ThreadPool.Client.SizeWarn=0' + \ r' --IceGrid.Registry.Client.ThreadPool.SizeWarn=0' + \ r' --Ice.ServerIdleTime=0' + \ r' --IceGrid.Registry.DefaultTemplates="' + \ os.path.abspath(os.path.join(TestUtil.toplevel, "cpp", "config", "templates.xml") + '"') def getDefaultLocatorProperty(): i = 0 property = '--Ice.Default.Locator="IceGrid/Locator'; objrefs = "" while i < nreplicas + 1: objrefs = objrefs + ':default -p ' + str(iceGridPort + i) i = i + 1 return ' %s%s"' % (property, objrefs) def startIceGridRegistry(testdir, dynamicRegistration = False): iceGrid = TestUtil.getIceGridRegistry() command = ' --nowarn ' + registryOptions if dynamicRegistration: command += r' --IceGrid.Registry.DynamicRegistration' procs = [] i = 0 while i < (nreplicas + 1): if i == 0: name = "registry" else: name = "replica-" + str(i) dataDir = os.path.join(testdir, "db", name) if not os.path.exists(dataDir): os.mkdir(dataDir) else: cleanDbDir(dataDir) sys.stdout.write("starting icegrid " + name + "... ") sys.stdout.flush() cmd = command + ' ' + TestUtil.getQtSqlOptions('IceGrid') + \ r' --Ice.ProgramName=' + name + \ r' --IceGrid.Registry.Client.Endpoints="default -p ' + str(iceGridPort + i) + '" ' + \ r' --IceGrid.Registry.Data="' + dataDir + '" ' if i > 0: cmd += r' --IceGrid.Registry.ReplicaName=' + name + ' ' + getDefaultLocatorProperty() driverConfig = TestUtil.DriverConfig("server") driverConfig.lang = "cpp" proc = TestUtil.startServer(iceGrid, cmd, driverConfig, count = 5) procs.append(proc) print("ok") i = i + 1 return procs def shutdownIceGridRegistry(procs): i = nreplicas while i > 0: sys.stdout.write("shutting down icegrid replica-" + str(i) + "...
") sys.stdout.flush() iceGridAdmin("registry shutdown replica-" + str(i)) print("ok") i = i - 1 sys.stdout.write("shutting down icegrid registry... ") sys.std
out.flush() iceGridAdmin("registry shutdown") print("ok") for p in procs: p.waitTestSuccess() def iceGridNodePropertiesOverride(): # # Create property overrides from command line options. # overrideOptions = '' for opt in shlex.split(TestUtil.getCommandLineProperties("", TestUtil.DriverConfig("server"))): opt = opt.strip().replace("--", "") index = opt.find("=") if index == -1: overrideOptions += ("%s=1 ") % opt else: key = opt[0:index] value = opt[index + 1:] if(value.find(' ') == -1): overrideOptions += ("%s=%s ") % (key, value) else: # # NOTE: We need 2 backslash before the quote to run the # C# test/IceGrid/simple test with SSL. # overrideOptions += ("%s=\\\"%s\\\" ") % (key, value.replace('"', '\\\\\\"')) return overrideOptions def startIceGridNode(testdir): iceGrid = TestUtil.getIceGridNode() dataDir = os.path.join(testdir, "db", "node") if not os.path.exists(dataDir): os.mkdir(dataDir) else: cleanDbDir(dataDir) overrideOptions = '" ' + iceGridNodePropertiesOverride() overrideOptions += ' Ice.ServerIdleTime=0 Ice.PrintProcessId=0 Ice.PrintAdapterReady=0"' sys.stdout.write("starting icegrid node... ") sys.stdout.flush() command = r' --nowarn ' + nodeOptions + getDefaultLocatorProperty() + \ r' --IceGrid.Node.Data="' + dataDir + '"' \ r' --IceGrid.Node.Name=localnode' + \ r' --IceGrid.Node.PropertiesOverride=' + overrideOptions driverConfig = TestUtil.DriverConfig("server") driverConfig.lang = "cpp" proc = TestUtil.startServer(iceGrid, command, driverConfig, adapter='node1') print("ok") return proc def iceGridAdmin(cmd, ignoreFailure = False): iceGridAdmin = TestUtil.getIceGridAdmin() user = r"admin1" if cmd == "registry shutdown": user = r"shutdown" command = getDefaultLocatorProperty() + r" --IceGridAdmin.Username=" + user + " --IceGridAdmin.Password=test1 " + \ r' -e "' + cmd + '"' if TestUtil.appverifier: TestUtil.setAppVerifierSettings([TestUtil.getIceGridAdmin()]) driverConfig = TestUtil.DriverConfig("client") driverConfig.lang = "cpp" proc = TestUtil.startClient(iceGridAdmin, command, driverConfig) status = proc.wait() if TestUtil.appverifier: TestUtil.appVerifierAfterTestEnd([TestUtil.getIceGridAdmin()]) if not ignoreFailure and status: print(proc.buf) sys.exit(1) return proc.buf def killNodeServers(): for server in iceGridAdmin("server list"): server = server.strip() iceGridAdmin("server disable " + server, True) iceGridAdmin("server signal " + server + " SIGKILL", True) def iceGridTest(application, additionalOptions = "", applicationOptions = ""): testdir = os.getcwd() if not TestUtil.isWin32() and os.getuid() == 0: print print("*** can't run test as root ***") print return if TestUtil.getDefaultMapping() == "java": os.environ['CLASSPATH'] = os.path.join(os.getcwd(), "classes") + os.pathsep + os.environ.get("CLASSPATH", "") client = TestUtil.getDefaultCli
ildar-band/rd90
rd90.py
Python
mit
18
0.055556
pr
int("My sc
ript")
nigusgirma/https-svn.pjsip.org-repos-pjproject-trunk-
tests/pjsua/scripts-sipp/uas-answer-200-reinvite-without-sdp.py
Python
gpl-2.0
136
0
# $Id$ # import inc_const as const PJSUA = ["--null-audio --max-calls=1 $SIPP_URI"] PJSUA_EXPECTS
= [[0, const.STATE_C
ONFIRMED, "v"]]
iamgyz/remote-system-control
server.py
Python
mit
2,750
0.020364
import paho.mqtt.publish as publish import paho.mqtt.client as mqtt import socket import json from datetime import datetime import configparser ''' Author: GYzheng, guanggyz@gmail.com ###Server side We have two topic, one is from client to server, the other one is from client to server 1. Server->Client : sc_topic 2. Client->Server : cs_topic ''' class command_handler: def __init__(self,host,port,topic): self.host = host self.port = int(port) self.sc_topic = 'sc_'+topic self.cs_topic = 'cs_'+topic self.get_host_info() self.subscribe_msg() def send_command(self,cmd): msg = self.json_generator(cmd,'run')#cmd,status self.send_msg(msg) def get_host_info(self): self.host_name = socket.gethostname() self.host_ip = socket.gethostbyname(socket.gethostname()) def subscribe_msg(self): self.subscriber = mqtt.Client() self.subscriber.on_connect = self.on_connect self.subscriber.on_message = self.on_message self.is_connect = False #using this variable to wait for connect ready self.subscriber.connect(self.host,self.port);#keep_alive=60 self.subscriber.loop_start() while self.is_co
nnect == False: pass#donothig... def send_msg(self,msg): publish.single(self.sc_topic,msg, hostname=self.host, port=self.port) def on_connect(self,client, userdata, flags, rc): self.is_connect = True #subscribe data from server client.subscribe(se
lf.cs_topic); def on_message(self,client,user_data,msg): try: tmp = json.loads(msg.payload.decode('utf-8','ignore')) client_name = tmp['name'] client_ip = tmp['ip'] client_status = tmp['status'] client_result = tmp['result'] print(client_name+","+client_ip+","+client_status) print(client_result) except: print("Not Json format!") def json_generator(self,cmd,status): msg = json.dumps({'name':self.host_name,'ip':self.host_ip,'timestamp':datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'status':status,'cmd':cmd}) return msg #main function if __name__ == "__main__": config = configparser.ConfigParser() config.read('server.conf') broker_ip = config['server.conf']['broker_ip'] broker_port = config['server.conf']['broker_port'] topic = config['server.conf']['topic'] ch = command_handler(broker_ip,broker_port,topic); print("Server start! Broker IP = "+broker_ip+", Broker PORT = "+broker_port+", topic = "+topic) while True: cmd = input("Please input command:\n") ch.send_command(cmd) pass
analogue/pyramid_swagger
tests/includeme_test.py
Python
bsd-3-clause
3,308
0
from bravado_core.spec import Spec import mock from pyramid.config import Configurator from pyramid.registry import Registry import pytest from swagger_spec_validator.common import SwaggerValidationError import pyramid_swagger from pyramid_swagger.model import SwaggerSchema @mock.patch('pyramid_swagger.register_api_doc_endpoints') @mock.patch('pyramid_swagger.get_swagger_schema') @mock.patch('pyramid_swagger.get_swagger_spec') def test_disable_api_doc_views(_1, _2, mock_register): settings = { 'pyramid_swagger.enable_api_doc_views': False, 'pyramid_swagger.enable_swagger_spec_validation': False, } mock_config = mock.Mock( spec=Configurator, registry=mock.Mock(spec=Registry, settings=settings)) pyramid_swagger.includeme(mock_config) assert not mock_register.called def test_bad_schema_validated_on_include(): settings = { 'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/', 'pyramid_swagger.enable_swagger_spec_validation': True, } mock_config = mock.Mock(registry=mock.Mock(settings=settings)) with pytest.raises(SwaggerValidationError): pyramid_swagger.includeme(mock_config) # TODO: Figure out why this assertion fails on travis # assert "'info' is a required property" in str(excinfo.value) @mock.patch('pyramid_swagger.get_swagger_spec') def test_bad_schema_not_validated_if_spec_validation_is_disabled(_): settings = { 'pyramid_swagger.schema_directory': 'tests/sample_schemas/bad_app/', 'pyramid_swagger.enable_swagger_spec_validation': False, } mock_config = mock.Mock( spec=Configurator, registry=mock.Mock(settings=settings)) pyramid_swagger.includeme(mock_config) @mock.patch('pyramid_swagger.register_api_doc_endpoints') def test_swagger_12_only(mock_register): settings = { 'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/', 'pyramid_swagg
er.swagger_versions': ['1.2'] } mock_config = mock.Mock(registry=mock.Mock(settings=settings)) pyramid_swagger.includeme(mock_config) assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema) assert mock_register.call_count == 1 @mock.patch('pyramid_swagger.register_api_doc_endpoints') def test_swagger_20_only(mock_register): settings = { 'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/', 'pyramid
_swagger.swagger_versions': ['2.0'] } mock_config = mock.Mock(registry=mock.Mock(settings=settings)) pyramid_swagger.includeme(mock_config) assert isinstance(settings['pyramid_swagger.schema20'], Spec) assert not settings['pyramid_swagger.schema12'] assert mock_register.call_count == 1 @mock.patch('pyramid_swagger.register_api_doc_endpoints') def test_swagger_12_and_20(mock_register): settings = { 'pyramid_swagger.schema_directory': 'tests/sample_schemas/good_app/', 'pyramid_swagger.swagger_versions': ['1.2', '2.0'] } mock_config = mock.Mock(registry=mock.Mock(settings=settings)) pyramid_swagger.includeme(mock_config) assert isinstance(settings['pyramid_swagger.schema20'], Spec) assert isinstance(settings['pyramid_swagger.schema12'], SwaggerSchema) assert mock_register.call_count == 2
minhphung171093/GreenERP_V8
openerp/addons/website/controllers/main.py
Python
agpl-3.0
20,190
0.003566
# -*- coding: utf-8 -*- import cStringIO import datetime from itertools import islice import json import xml.etree.ElementTree as ET import logging import re import werkzeug.utils import urllib2 import werkzeug.wrappers from PIL import Image import openerp from openerp.addons.web.controllers.main import WebClient from openerp.addons.web import http from openerp.http import request, STATIC_CACHE from openerp.tools import image_save_for_web logger = logging.getLogger(__name__) # Completely arbitrary limits MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768) LOC_PER_SITEMAP = 45000 SITEMAP_CACHE_TIME = datetime.timedelta(hours=12) class Website(openerp.addons.web.controllers.main.Home): #------------------------------------------------------ # View #------------------------------------------------------ @http.route('/', type='http', auth="public", website=True) def index(self, **kw): page = 'homepage' try: main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu') except Exception: pass else: first_menu = main_menu.child_id and main_menu.child_id[0] if first_menu: if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')): return request.redirect(first_menu.url) if first_menu.url.startswith('/page/'): return request.registry['ir.http'].reroute(first_menu.url) return self.page(page) @http.route(website=True, auth="public") def web_login(self, *args, **kw): # TODO: can't we just put auth=public, ... in web client ? return super(Website, self).web_login(*args, **kw) @http.route('/website/lang/<lang>', type='http', auth="public", website=True, multilang=False) def change_lang(self, lang, r='/', **kwargs): if lang == 'default': lang = request.website.default_lang_code r = '/%s%s' % (lang, r or '/') redirect = werkzeug.utils.redirect(r or ('/%s' % lang), 303) redirect.set_cookie('website_lang', lang) return redirect @http.route('/page/<page:page>', type='http', auth="public", website=True) def page(self, page, **opt): values = { 'path': page, } # /page/website.XXX --> /page/XXX if page.startswith('website.'): return request.redirect('/page/' + page[8:], code=301) elif '.' not in page: page = 'website.%s' % page try: request.website.get_template(page) except ValueError, e: # page not found if request.website.is_publisher(): page = 'website.page_404' else: return request.registry['ir.http']._handle_exception(e, 404) return request.render(page, values) @http.route(['/robots.txt'], type='http', auth="public") def robots(self): return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain') @http.route('/sitemap.xml', type='http', auth="public", website=True) def sitemap_xml_index(self): cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context ira = request.registry['ir.attachment'] iuv = request.registry['ir.ui.view'] mimetype ='application/xml;charset=utf-8' content = None def create_sitemap(url, content): ira.create(cr, uid, dict( datas=content.encode('base64'), mimetype=mimetype, type='binary', name=url, url=url, ), context=context) sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context) if sitemap: # Check if stored version is still valid server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format) delta = datetime.datetime.now() - create_date if delta < SITEMAP_CACHE_TIME: content = sitemap[0]['datas'].decode('base64') if not content: # Remove all sitemaps in ir.attachments as we're going to regenerated them sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context) if sitemap_ids: ira.unlink(cr, uid, sitemap_ids, context=context) pages = 0 first_page = None locs = request.website.sudo(user=request.website.user_id.id).enumerate_pages() while True: start = pages * LOC_PER_SITEMAP values = { 'locs': islice(locs, start, start + LOC_PER_SITEMAP), 'url_root': request.httprequest.url_root[:-1], } urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context) if urls.strip(): page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context) if not first_page: first_page = page pages += 1 create_sitemap('/sitemap-%d.xml' % pages, page) else: break if not pages: return request.not_found() elif pages == 1: content = first_page else: # Sitemaps must be split in several smaller files with a sitemap index content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict( pages=range(1, pages + 1), url_root=request.httprequest.url_root, ), context=context) create_sitemap('/sitemap.xml', content) return request.make_response(content, [('Content-Type', mimetype)]) @http.route('/website/info', type='http', auth="public", website=True) def website_info(self): try: request.website.get_template('website.info').name except Exception, e: return request.registry['ir.http']._handle_exception(e, 404) irm = request.env()['ir.module.module'].sudo() apps = irm.search([('state','=','installed'),('application','=',True)]) modules = irm.search([('state','=','installed'),('application','=',False)]) values = {
'apps': apps, 'modules': modules, 'v
ersion': openerp.service.common.exp_version() } return request.render('website.info', values) #------------------------------------------------------ # Edit #------------------------------------------------------ @http.route('/website/add/<path:path>', type='http', auth="user", website=True) def pagenew(self, path, noredirect=False, add_menu=None): xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context) if add_menu: model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu') request.registry['website.menu'].create(request.cr, request.uid, { 'name': path, 'url': "/page/" + xml_id, 'parent_id': id, }, context=request.context) # Reverse action in order to allow shortcut for /page/<website_xml_id> url = "/page/" + re.sub(r"^website\.", '', xml_id) if noredirect: return werkzeug.wrappers.Response(url, mimetype='text/plain') return werkzeug.utils.redirect(url) @http.route('/website/theme_change', type='http', auth="user", website=True) def theme_change(self, theme_id=False, **kwargs): imd = request.registry['ir.model.data'] Views = request.registry['ir.ui.view'] _, theme_template_id = imd.get_object_reference( request.cr, reques
willprice/weboob
weboob/browser/elements.py
Python
agpl-3.0
9,532
0.001154
# -*- coding: utf-8 -*- # Copyright(C) 2014 Romain Bignon # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. import re import sys from copy import deepcopy from weboob.tools.log import getLogger, DEBUG_FILTERS from weboob.tools.ordereddict import OrderedDict from weboob.browser.pages import NextPage from .filters.standard import _Filter, CleanText from .filters.html import AttributeNotFound, XPathNotFound __all__ = ['DataError', 'AbstractElement', 'ListElement', 'ItemElement', 'TableElement', 'SkipItem'] class DataError(Exception): """ Returned data from pages are incoherent. """ def method(klass): """ Class-decorator to call it as a method. """ def inner(self, *args, **kwargs): return klass(self)(*args, **kwargs) return inner class AbstractElement(object): _creation_counter = 0 def __init__(self, page, parent=None, el=None): self.page = page self.parent = parent if el is not None: self.el = el elif parent is not None: self.el = parent.el else: self.el = page.doc if parent is not None: self.env = deepcopy(parent.env) else: self.env = deepcopy(page.params) # Used by debug self._random_id = AbstractElement._creation_counter AbstractElement._creation_counter += 1 self.loaders = {} def use_selector(self, func, key=None): if isinstance(func, _Filter): func._obj = self func._key = key value = func(self) elif isinstance(func, type) and issubclass(func, ItemElement): value = func(self.page, self, self.el)() elif callable(func): value = func() else: value = deepcopy(func) return value def parse(self, obj): pass def cssselect(self, *args, **kwargs): return self.el.cssselect(*args, **kwargs) def xpath(self, *args, **kwargs): return self.el.xpath(*args, **kwargs) def handle_loaders(self): for attrname in dir(self): m = re.match('load_(.*)', attrname) if not m: continue name = m.group(1) if name in self.loaders: continue loader = getattr(self, attrname) self.loaders[name] = self.use_selector(loader, key=attrname) class ListElement(AbstractElement): item_xpath = None flush_at_end = False ignore_duplicate = False def __init__(self, *args, **kwargs): super(ListElement, self).__init__(*args, **kwargs) self.logger = getLogger(self.__class__.__name__.lower()) self.objects = OrderedDict() def __call__(self, *args, **kwargs): for key, value in kwargs.iteritems(): self.env[key] = value return self.__iter__() def find_elements(self): """ Get the nodes that will have to be processed. This method can be overridden if xpath filters are not sufficient. """ if self.item_xpath is not None: for el in self.el.xpath(self.item_xpath): yield el else: yield self.el def __iter__(self): self.parse(self.el) items = [] for el in self.find_elements(): for attrname in dir(self): attr = getattr(self, attrname) if isinstance(attr, type) and issubclass(attr, AbstractElement) and attr != type(self): item = attr(self.page, self, el) item.handle_loaders() items.append(item) for item in items: for obj in item: obj = self.store(obj) if obj and not self.flush_at_end: yield obj if self.flush_at_end: for obj in self.flush(): yield obj self.check_next_page() def flush(self): for obj in self.objects.itervalues(): yield obj def check_next_page(self): if not hasattr(self, 'next_page'): return next_page = getattr(self, 'next_page') try: value = self.use_selector(next_page) except (AttributeNotFound, XPathNotFound): return if value is None: return raise NextPage(value) def store(self, obj): if obj.id: if obj.id in self.objects: if self.ignore_duplicate: self.logger.warning('There are two objects with the same ID! %s' % obj.id) return else: raise DataError('There are two objects with the same ID! %s' % obj.id) self.objects[obj.id] = obj return obj class SkipItem(Exception): """ Raise this exception in an :class:`ItemElement` subclass to skip an item. """ class _ItemElementMeta(type): """ Private meta-class used to keep order of obj_* attributes in :class:`ItemElement`. """ def __new__(mcs, name, bases, attrs): _attrs = [] for base in bases: if hasattr(base, '_attrs'): _attrs += base._attrs filters = [(re.sub('^obj_', '', attr_name), attrs[attr_name]) for attr_name, obj in attrs.items() if attr_name.startswith('obj_')] # constants first, then filters, then methods filters.sort(key=lambda x: x[1]._creation_counter if hasattr(x[1], '_creation_counter') else (sys.maxsize if callable(x[1]) else 0)) new_class = super(_ItemElementMeta, mcs).__new__(mcs, name, bases, attrs) new_class._attrs = _attrs + [f[0] for f in filters] return new_class class ItemElement(AbstractElement): __metaclass__ = _ItemElementMeta _attrs =
None _loaders = None klass = None condition = None validate = None class Index(object): pass def __init__(self, *args, **kwargs): super(ItemElement, self).__init__(*args, **kwargs) self.logger = getLogger(self.__class__.__name__.lower()) self.obj = None def build_object(self): if self.klass is None: return return self.klass() def __call__(self, obj=None): if obj is not No
ne: self.obj = obj for obj in self: return obj def __iter__(self): if self.condition is not None and not self.condition(): return try: if self.obj is None: self.obj = self.build_object() self.parse(self.el) self.handle_loaders() for attr in self._attrs: self.handle_attr(attr, getattr(self, 'obj_%s' % attr)) except SkipItem: return if self.validate is not None and not self.validate(self.obj): return yield self.obj def handle_attr(self, key, func): try: value = self.use_selector(func, key=key) except Exception as e: # Help debugging as tracebacks do not give us the key self.logger.warning('Attribute %s raises %s' % (key, repr(e))) raise logger = getLogger('b2filters') logger.log(DEBUG_FILTERS, "%s.%s = %r" % (self._random_id, key, value)) setattr(self.obj, key, value) class TableElement(ListElement): head_xpath = None cleaner = CleanText def __init__(self, *args, **kwargs): super(TableElement, self).
TNT-Samuel/Coding-Projects
Machine Learning with Python/Chapter 1/P_15.py
Python
gpl-3.0
90
0
from P_14 import * print("Shape of data:
{}".format(iris_dataset[
"data"].shape)) input()
biggihs/python-pptx
features/steps/chartdata.py
Python
mit
7,361
0
# encoding: utf-8 """Gherkin step implementations for chart data features.""" from __future__ import ( absolute_import, division, print_function, unicode_literals ) import datetime from behave import given, then, when from pptx.chart.data import ( BubbleChartData, Category, CategoryChartData, XyChartData ) from pptx.enum.chart import XL_CHART_TYPE from pptx.util import Inches # given =================================================== @given('a BubbleChartData object with number format {strval}') def given_a_BubbleChartData_object_with_number_format(context, strval): params = {} if strval != 'None': params['number_format'] = int(strval) context.chart_data = BubbleChartData(**params) @given('a Categories object with number format {init_nf}') def g
iven_a_Categories_object_with_number_format_init_nf(context, init_nf): categories = CategoryChartData().categories if init_nf != 'left as default': categories.number_format = init_nf context.categories = categories @given('a Category object') def given_a_Category_object(context): context.category = Category(None, None) @given('a CategoryChartData object')
def given_a_CategoryChartData_object(context): context.chart_data = CategoryChartData() @given('a CategoryChartData object having date categories') def given_a_CategoryChartData_object_having_date_categories(context): chart_data = CategoryChartData() chart_data.categories = [ datetime.date(2016, 12, 27), datetime.date(2016, 12, 28), datetime.date(2016, 12, 29), ] context.chart_data = chart_data @given('a CategoryChartData object with number format {strval}') def given_a_CategoryChartData_object_with_number_format(context, strval): params = {} if strval != 'None': params['number_format'] = int(strval) context.chart_data = CategoryChartData(**params) @given('a XyChartData object with number format {strval}') def given_a_XyChartData_object_with_number_format(context, strval): params = {} if strval != 'None': params['number_format'] = int(strval) context.chart_data = XyChartData(**params) @given('the categories are of type {type_}') def given_the_categories_are_of_type(context, type_): label = { 'date': datetime.date(2016, 12, 22), 'float': 42.24, 'int': 42, 'str': 'foobar', }[type_] context.categories.add_category(label) # when ==================================================== @when('I add a bubble data point with number format {strval}') def when_I_add_a_bubble_data_point_with_number_format(context, strval): series_data = context.series_data params = {'x': 1, 'y': 2, 'size': 10} if strval != 'None': params['number_format'] = int(strval) context.data_point = series_data.add_data_point(**params) @when('I add a data point with number format {strval}') def when_I_add_a_data_point_with_number_format(context, strval): series_data = context.series_data params = {'value': 42} if strval != 'None': params['number_format'] = int(strval) context.data_point = series_data.add_data_point(**params) @when('I add an XY data point with number format {strval}') def when_I_add_an_XY_data_point_with_number_format(context, strval): series_data = context.series_data params = {'x': 1, 'y': 2} if strval != 'None': params['number_format'] = int(strval) context.data_point = series_data.add_data_point(**params) @when('I add an {xy_type} chart having 2 series of 3 points each') def when_I_add_an_xy_chart_having_2_series_of_3_points(context, xy_type): chart_type = getattr(XL_CHART_TYPE, xy_type) data = ( ('Series 1', ((-0.1, 0.5), (16.2, 0.0), (8.0, 0.2))), ('Series 2', ((12.4, 0.8), (-7.5, -0.5), (-5.1, -0.2))) ) chart_data = XyChartData() for series_data in data: series_label, points = series_data series = chart_data.add_series(series_label) for point in points: x, y = point series.add_data_point(x, y) context.chart = context.slide.shapes.add_chart( chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data ).chart @when("I assign ['a', 'b', 'c'] to chart_data.categories") def when_I_assign_a_b_c_to_chart_data_categories(context): chart_data = context.chart_data chart_data.categories = ['a', 'b', 'c'] # then ==================================================== @then("[c.label for c in chart_data.categories] is ['a', 'b', 'c']") def then_c_label_for_c_in_chart_data_categories_is_a_b_c(context): chart_data = context.chart_data assert [c.label for c in chart_data.categories] == ['a', 'b', 'c'] @then('categories.number_format is {value}') def then_categories_number_format_is_value(context, value): expected_value = value number_format = context.categories.number_format assert number_format == expected_value, 'got %s' % number_format @then('category.add_sub_category(name) is a Category object') def then_category_add_sub_category_is_a_Category_object(context): category = context.category context.sub_category = sub_category = category.add_sub_category('foobar') assert type(sub_category).__name__ == 'Category' @then('category.sub_categories[-1] is the new category') def then_category_sub_categories_minus_1_is_the_new_category(context): category, sub_category = context.category, context.sub_category assert category.sub_categories[-1] is sub_category @then('chart_data.add_category(name) is a Category object') def then_chart_data_add_category_name_is_a_Category_object(context): chart_data = context.chart_data context.category = category = chart_data.add_category('foobar') assert type(category).__name__ == 'Category' @then('chart_data.add_series(name, values) is a CategorySeriesData object') def then_chart_data_add_series_is_a_CategorySeriesData_object(context): chart_data = context.chart_data context.series = series = chart_data.add_series('Series X', (1, 2, 3)) assert type(series).__name__ == 'CategorySeriesData' @then('chart_data.categories is a Categories object') def then_chart_data_categories_is_a_Categories_object(context): chart_data = context.chart_data assert type(chart_data.categories).__name__ == 'Categories' @then('chart_data.categories[-1] is the category') def then_chart_data_categories_minus_1_is_the_category(context): chart_data, category = context.chart_data, context.category assert chart_data.categories[-1] is category @then('chart_data.number_format is {value_str}') def then_chart_data_number_format_is(context, value_str): chart_data = context.chart_data number_format = value_str if value_str == 'General' else int(value_str) assert chart_data.number_format == number_format @then('chart_data[-1] is the new series') def then_chart_data_minus_1_is_the_new_series(context): chart_data, series = context.chart_data, context.series assert chart_data[-1] is series @then('series_data.number_format is {value_str}') def then_series_data_number_format_is(context, value_str): series_data = context.series_data number_format = value_str if value_str == 'General' else int(value_str) assert series_data.number_format == number_format
gawel/irc3
tests/test_slack.py
Python
mit
2,123
0
# -*- coding: utf-8 -*- import pytest from irc3.plugins import slack pytestmark = pytest.mark.asyncio async def test_simple_matches(irc3_bot_factory): bot = irc3_bot_factory(includes=['irc3.plugins.slack']) plugin = bot.get_plugin(slack.Slack) setattr(plugin, 'config', {'token': 'xoxp-faketoken'}) assert '' == await plugin.parse_text('\n') assert '' == await plugin.parse_text('\r\n') assert '' == await plugin.parse_text('\r') assert '@channel' == await plugin.parse_text('<!channel>') assert '@group' == await plugin.parse_text('<!group>') assert '@everyone' == await plugin.parse_text('<!everyone>') assert '<' == await plugin.parse_text('&lt') assert '>' == await plugin.parse_text('&gt') assert '&' == await plugin.parse_text('&amp') assert 'daniel' == await plugin.parse_text('<WHATEVER|daniel>') async def test_channel_matches(irc3_bot_factory): bot = irc3_bot_f
actory(includes=['irc3.plugins.slack']) plugin = bot.get_plugin(slack.Slack) setattr(plugin, 'config', {'token': 'xoxp-faketoken'}) async def api_call(self, method, date=None): return ({'channel': {'name': 'testchannel'}}) plugin.api_call = api_call asse
rt '#testchannel' == await plugin.parse_text('<#C12345>') assert 'channel' == await plugin.parse_text('<#C12345|channel>') async def test_user_matches(irc3_bot_factory): bot = irc3_bot_factory(includes=['irc3.plugins.slack']) plugin = bot.get_plugin(slack.Slack) setattr(plugin, 'config', {'token': 'xoxp-faketoken'}) async def api_call(self, method, date=None): return ({'user': {'name': 'daniel'}}) plugin.api_call = api_call assert '@daniel' == await plugin.parse_text('<@U12345>') assert 'user' == await plugin.parse_text('<@U12345|user>') async def test_emoji_matches(irc3_bot_factory): bot = irc3_bot_factory(includes=['irc3.plugins.slack']) plugin = bot.get_plugin(slack.Slack) setattr(plugin, 'config', {'token': 'xoxp-faketoken'}) assert ':-)' == await plugin.parse_text(':smiley:') assert ':@' == await plugin.parse_text(':rage:')
feus4177/socketIO-client-2
socketIO_client/symmetries.py
Python
mit
897
0.001115
import six try: from lo
gging import NullHandler except ImportError: # Python 2.6 from logging import Handler class NullHandler(Handler): def emit(self, record): pass try: from urllib import urlencode as format
_query except ImportError: from urllib.parse import urlencode as format_query try: from urlparse import urlparse as parse_url except ImportError: from urllib.parse import urlparse as parse_url try: memoryview = memoryview except NameError: memoryview = buffer def get_int(*args): try: return int(get_character(*args)) except ValueError: return ord(get_character(*args)) def get_character(x, index): return chr(get_byte(x, index)) def get_byte(x, index): return six.indexbytes(x, index) def encode_string(x): return x.encode('utf-8') def decode_string(x): return x.decode('utf-8')
f0lie/RogueGame
src/room.py
Python
mit
3,171
0
from random import randint from position import Position, Size from block import Room, Block class Room(object): def __init__(self, pos_row=0, pos_col=0, rows=1, cols=1, fill=Block.empty, left=Room.left, right=Room.right, top=Room.top, bottom=Room.bottom, top_left=Room.top_left, top_right=Room.top_right, bottom_left=Room.bottom_left, bottom_right=Room.bottom_right): self.pos = Position(pos_row, pos_col) self.center = Position(pos_row + (rows // 2), pos_col + (cols // 2)) self.size = Size(rows, cols) self.fill = fill # Specific the block of walls self.left = left self.right = right self.top = top self.bottom = bottom self.top_left = top_left self.top_right = top_right self.bottom_left = bottom_left self.bottom_right = bottom_right @classmethod def from_objects(cls, pos, size, **kwargs): return cls(pos.row, pos.col, size.rows, size.cols, **kwargs) def collision(self, other_room): """ Checks if two rooms intersect each other The logic is clearer as a one dimension line """ pos_2 = Position(self.pos.row + self.size.rows, self.pos.col + self.size.cols) other_room_pos_2 = Position(other_room.pos.row + other_room.size.rows, other_room.pos.col + other_room.size.cols) return (self.pos.col <= other_room_pos_2.col and pos_2.col >= other_room.pos.col and self.pos.row <=
other_room_pos_2.row and pos_2.row >= other_room.pos.row) @class
method def generate(cls, min_pos, max_pos, min_size, max_size): """ Create room from min_size to max_size between min_pos and max_pos """ size = Size(randint(min_size.rows, max_size.rows), randint(min_size.cols, max_size.cols)) pos = Position(randint(min_pos.row, max_pos.row - size.rows), randint(min_pos.col, max_pos.col - size.cols)) return cls.from_objects(pos, size) class RoomList(): def __init__(self): self._room_list = [] def __iter__(self): return iter(self._room_list) def __getitem__(self, key): return self._room_list[key] def __len__(self): return len(self._room_list) def append(self, room): self._room_list.append(room) def generate(self, num, min_pos, max_pos, min_size, max_size): """ Given a number of rooms, generate rooms that don't intersect """ for i in range(num): room = Room.generate(min_pos, max_pos, min_size, max_size) while self.is_collision(room): room = Room.generate(min_pos, max_pos, min_size, max_size) self.append(room) def is_collision(self, room): """ Iterate through the list of rooms to test for collisions """ for other_room in self: if other_room.collision(room): return True return False
V-Paranoiaque/Domoleaf
domomaster/usr/bin/domomaster/domomaster_postinst.py
Python
gpl-3.0
4,613
0.017993
#!/usr/bin/python3 ## @package domomaster # Master daemon for D3 boxes. # # Developed by GreenLeaf. import sys; import os; import random; import string; from hashlib import sha1 from subprocess import * import socket; sys.path.append("/usr/lib/domoleaf"); from DaemonConfigParser import *; MASTER_CONF_FILE_BKP = '/etc/domoleaf/master.conf.save'; MASTER_CONF_FILE_TO = '/etc/domoleaf/master.conf'; SLAVE_CONF_FILE = '/etc/domoleaf/slave.conf'; ## Copies the conf data from a backup file to a new one. def master_conf_copy(): file_from = DaemonConfigParser(MASTER_CONF_FILE_BKP); file_to = DaemonConfigParser(MASTER_CONF_FILE_TO); #listen var = file_from.getValueFromSection('listen', 'port_slave'); file_to.writeValueFromSection('listen', 'port_slave', var); var = file_from.getValueFromSection('listen', 'port_cmd'); file_to.writeValueFromSection('listen', 'port_cmd', var); #connect var = file_from.getValueFromSection('connect', 'port'); file_to.writeValueFromSection('connect', 'port', var); #mysql var = file_from.getValueFromSection('mysql', 'user'); file_to.writeValueFromSection('mysql', 'user', var); var = file_from.getValueFromSection('mysql', 'database_name'); file_to.writeValueFromSection('mysql', 'database_name', var); #greenleaf var = file_from.getValueFromSection('greenleaf', 'commercial'); file_to.writeValueFromSection('greenleaf', 'commercial', var); var = file_from.getValueFromSection('greenleaf', 'admin_addr'); file_to.writeValueFromSection('greenleaf', 'admin_addr', var); ## Initializes the conf in database. def master_conf_initdb(): file = DaemonConfigParser(MASTER_CONF_FILE_TO); #mysql password password = ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(128)) password = sha1(password.encode('utf-8')) file.writeValueFromSection('mysql', 'password', password.hexdigest()); os.system('sed -i "s/define(\'DB_PASSWORD\', \'domoleaf\')/define(\'DB_PASSWORD\', \''+password.hexdigest()+'\')/g" /etc/domoleaf/www/config.php') #mysql user query1 = 'DELETE FROM user WHERE User="domoleaf"'; query2 = 'DELETE FROM db WHERE User="domoleaf"'; query3 = 'INSERT INTO user (Host, User, Password) VALUES (\'%\', \'domoleaf\', PASSWORD(\''+password.hexdigest()+'\'));'; query4 = 'INSERT INTO db (Host, Db, User, Select_priv, Insert_priv, Update_priv, Delete_priv, Create_priv, Drop_priv, Grant_priv, References_priv, Index_priv, Alter_priv, Create_tmp_table_priv, Lock_tables_priv, Create_view_priv, Show_view_priv, Create_routine_priv, Alter_routine_priv, Execute_priv, Event_priv, Trigger_priv) VALUES ("%","domoleaf","domoleaf","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y","Y");'; query5 = 'FLUSH PRIVILEGES'; call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query1]); call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query2]); call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query3]); call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query4]); call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'mysql', '-e', query5]); ## Initializes the conf in file. def master_conf_init(): file = DaemonConfigParser(SLAVE_CONF_FILE); personnal_key = file.getValueFromSection('personnal_key', 'aes'); hostname = socket.gethostname(); #KNX Interface if os.path.exists('/dev/ttyAMA0'): knx = "tpuarts" knx_interface = 'ttyAMA0'; elif os.path.exists('/dev/ttyS0'): knx = "tpuarts" knx_interface = 'ttyS0'; else: knx = "ipt" knx_interface = '127.0.0.1'; domoslave = os.popen("dpkg-query -W -f='${Version}\n' domoslave").read().split('\n')[0]; query1 = "INSERT INTO daemon (name, serial, secretkey, validation, version) VALUES ('"+hostname+"','"+hostname+"','"+personnal_key+"',1,'"+domoslave+"')" query2 = "INSERT INTO daemon_protocol (daemon_id, protocol_id, interface, interface_arg) VALUES (1,1,'"+knx+"','"+knx_interface+"')" call(['mysql', '--defaults-file=/etc/m
ysql/debian.cnf', 'domoleaf',
'-e', query1]); call(['mysql', '--defaults-file=/etc/mysql/debian.cnf', 'domoleaf', '-e', query2]); if __name__ == "__main__": #Upgrade if os.path.exists(MASTER_CONF_FILE_BKP): master_conf_copy() os.remove(MASTER_CONF_FILE_BKP); else: master_conf_init() master_conf_initdb()
VinnieJohns/ggrc-core
src/ggrc/migrations/versions/20170105231037_579239d161e1_create_missing_snapshot_revisions.py
Python
apache-2.0
1,087
0.0046
# Copyright (C) 2017 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Create missing snapshot revisions. Create Date: 2017-01-05 23:10:37.257161 """ # disable Invalid constant name pylint warning for mandatory Alembic variables. # pylint: disable=invalid-name from ggrc.migrations.utils.snapshot_revisions import handle_objects # revision identifiers, used by Alembic. revision = '579239d161e1' down_revision = '353e5f281799' def upgrade(): """Create missing revisions for snapshottable objects.""" # copy pasted from ggrc.snapshoter.rules.Types.all snapshot_objects = sorted([ "AccessGroup", "Clause", "Control", "DataAsset", "Facility", "Market", "Objective", "OrgGroup", "Product", "Sec
tion", "Vendor", "Policy", "Regulation", "Standard", "Contract", "System", "
Process", "Risk", "Threat", ]) handle_objects(snapshot_objects) def downgrade(): """Data correction migrations can not be downgraded."""
lueschem/edi
edi/commands/version.py
Python
lgpl-3.0
1,351
0
# -*- coding: utf-8 -*- # Copyright (C) 2017 Matthias Luescher # # Authors: # Matthias Luescher # # This file is part of edi. # # edi is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # edi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with edi. If not, see <http://www.gnu.org/licenses/>. from edi.lib.edicommand import EdiCommand from edi.lib.versionhelpers import get_edi_version class Version(EdiCommand): @classmethod def advertise(cls, subparsers): help_text = "print the program version" description_text = "Print the program version." subparsers.add_parser(cls._get_short_command_name(), help=help_text, description=description_text) def run_cli(self, _):
ver
sion = self.run() print(version) @staticmethod def run(): return get_edi_version()
Macmod/rgb-remote-tts
remote-gtts/remote2gtts.py
Python
mit
507
0
#!/bin/python import sys import vlc import os import re from t
empfile import * from gtts import gTTS from remote2text import RGBRemote2Text parser = RGBRemote2Text(verbose=True) while True: ir_out = input() response = parser.process(ir_out) if response: tts = gTTS(text=response, lang='pt') tmp = NamedTemporaryFile(delete=False) tts.write_to_fp(tmp) path = os.path.join(gettempdir(), str(tmp.name)) vlc.MediaPlayer(path).play() tmp.
close()
everypony/ponyFiction
ponyFiction/views/chapter.py
Python
gpl-3.0
5,461
0.001492
# -*- coding: utf-8 -*- from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied from django.db.models import Max, F from django.shortcuts import render, get_object_or_404, redirect from django.utils.decorators import method_decorator from django.views.generic.edit import CreateView, UpdateView, DeleteView from ponyFiction import signals from ponyFiction.forms.chapter import ChapterForm from ponyFiction.models import Story, Chapter, Author from django.views.decorators.csrf import csrf_protect from cacheops import invalidate_obj from .story import get_story def chapter_view(request, story_id=False, chapter_order=False): story = get_story(request, pk=story_id) if chapter_order: chapter = get_object_or_404(story.chapter_set, order=chapter_order) page_title = "{} — {}".format(chapter.title[:80], story.title) prev_chapter = chapter.get_prev_chapter() next_chapter = chapter.get_next_chapter() if request.user.is_authenticated(): signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=chapter) data = { 'story': story, 'chapter': chapter, 'prev_chapter': prev_chapter, 'next_chapter': next_chapter, 'page_title': page_title, 'allchapters': False } else: chapters = story.chapter_set.order_by('order').cache() page_title = "{} — все главы".format(story.title) if request.user.is_authenticated(): signals.story_viewed.send(sender=Author, instance=request.user, story=story, chapter=None) data = { 'story': story, 'chapters': chapters, 'page_title': page_title, 'allchapters': True } return render(request, 'chapter_view.html', data) class ChapterAdd(CreateView): model = Chapter form_class = ChapterForm template_name = 'chapter_work.html' initial = {'button_submit': 'Добавить'} story = None @method_decorator(login_required) @method_decorator(csrf_protect) def dispatch(self, request, *args, **kwargs): self.story = get_object_or_404(Story, pk=kwargs['story_id']) if self.story.editable_by(request.user): return CreateView.dispatch(self, request, *args, **kwargs) else: raise PermissionDenied def form_valid(self, form): chapter = form.save(commit=False) chapter.story = self.story chapter.order = (self.story.chapter_set.aggregate(o=Max('order'))['o'] or 0) + 1 chapter.save() return redirect('chapter_edit', chapter.id) def get_context_data(self, **kwargs): context = super(ChapterAdd, self).get_context_data(**kwargs) extra_context = {'page_title': 'Добавить новую главу', 'story': self.story} context.update(extra_context) return context class ChapterEdit(UpdateView): model = Chapter form_class = ChapterForm template_name = 'chapter_work.html' initial = {'button_submit': 'Сохранить изменения'} chapter = None @method_decorator(login_required) @method_decorator(csrf_protect) def dispatch(self, request, *args, **kwargs): return UpdateView.dispatch(self, request, *args, **kwargs) def get_object(self, queryset=None): self.chapter = UpdateView.get_object(self
, queryset=queryset) if self.chapter.story.editable_by(self.request.user): return self.chapter else: raise PermissionDenied def form_valid(self, form): self.chapter = form.save() return redirect('chapter_edit', self.chapter.id) def get_context_data(self, **kwargs): context = super(ChapterEdit, self).get_context_data(**kwargs)
extra_context = {'page_title': 'Редактирование «%s»' % self.chapter.title, 'chapter': self.chapter} context.update(extra_context) return context class ChapterDelete(DeleteView): model = Chapter chapter = None story = None chapter_id = None template_name = 'chapter_confirm_delete.html' @method_decorator(login_required) @method_decorator(csrf_protect) def dispatch(self, request, *args, **kwargs): return DeleteView.dispatch(self, request, *args, **kwargs) def get_object(self, queryset=None): self.chapter = DeleteView.get_object(self, queryset=queryset) self.story = self.chapter.story self.chapter_id = self.chapter.id if self.story.editable_by(self.request.user): return self.chapter else: raise PermissionDenied def delete(self, request, *args, **kwargs): self.chapter = self.get_object() self.story.chapter_set.filter(order__gt=self.chapter.order).update(order=F('order')-1) for chapter in self.story.chapter_set.filter(order__gt=self.chapter.order): invalidate_obj(chapter) self.chapter.delete() return redirect('story_edit', self.story.id) def get_context_data(self, **kwargs): context = super(ChapterDelete, self).get_context_data(**kwargs) extra_context = {'page_title': 'Подтверждение удаления главы', 'story': self.story, 'chapter': self.chapter} context.update(extra_context) return context
beernarrd/gramps
gramps/gen/display/name.py
Python
gpl-2.0
45,844
0.004319
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2004-2007 Donald N. Allingham # Copyright (C) 2010 Brian G. Matherly # Copyright (C) 2014 Paul Franklin # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Class handling language-specific displaying of names. Specific symbols for parts of a name are defined: ====== =============================================================== Symbol Description ====== =============================================================== 't' title 'f' given (first names) 'l' full surname (lastname) 'c' callname 'x' nick name, call, or otherwise first first name (common name) 'i' initials of the first names 'm' primary surname (main) '0m' primary surname prefix '1m' primary surname surname '2m' primary surname connector 'y' pa/matronymic surname (father/mother) - assumed unique '0y' pa/matronymic prefix '1y' pa/matronymic surname '2y' pa/matronymic connector 'o' surnames without pa/matronymic and primary 'r' non primary surnames (rest) 'p' list of all prefixes 'q' surnames without prefixes and connectors 's' suffix 'n' nick name 'g' family nick name ====== =============================================================== """ #------------------------------------------------------------------------- # # Python modules # #------------------------------------------------------------------------- import re import logging LOG = logging.getLogger(".gramps.gen") #------------------------------------------------------------------------- # # Gramps modules # #------------------------------------------------------------------------- from ..const import ARABIC_COMMA, ARABIC_SEMICOLON, GRAMPS_LOCALE as glocale _ = glocale.translation.sgettext from ..lib.name import Name from ..lib.nameorigintype import NameOriginType try: from ..config import config WITH_GRAMPS_CONFIG=True except ImportError: WITH_GRAMPS_CONFIG=False #------------------------------------------------------------------------- # # Constants # #------------------------------------------------------------------------- _FIRSTNAME = 4 _SURNAME_LIST = 5 _SUFFIX = 6 _TITLE = 7 _TYPE = 8 _GROUP = 9 _SORT = 10 _DISPLAY = 11 _CALL = 12 _NICK = 13 _FAMNICK = 14 _SURNAME_IN_LIST = 0 _PREFIX_IN_LIST = 1 _PRIMARY_IN_LIST = 2 _TYPE_IN_LIST = 3 _CONNECTOR_IN_LIST = 4 _ORIGINPATRO = NameOriginType.PATRONYMIC _ORIGINMATRO = NameOriginType.MATRONYMIC _ACT = True _INA = False _F_NAME = 0 # name of the format _F_FMT = 1 # the format string _F_ACT = 2 # if the format is active _F_FN = 3 # name format function _F_RAWFN = 4 # name format raw function PAT_AS_SURN = False #------------------------------------------------------------------------- # # Local functions # #------------------------------------------------------------------------- # Because of occurring in an exec(), this couldn't be in a lambda: # we sort names first on longest first, then last letter first, this to # avoid translations of shorter terms which appear in longer ones, eg # namelast may not be mistaken with name, so namelast must first be # converted to %k before name is converted. ##def _make_cmp(a, b): return -cmp((len(a[1]),a[1]), (len(b[1]), b[1])) def _make_cmp_key(a): return (len(a[1]),a[1]) # set reverse to True!! #-----
-------------------------------------------------------------------- # # NameDisplayError class # #--------------
----------------------------------------------------------- class NameDisplayError(Exception): """ Error used to report that the name display format string is invalid. """ def __init__(self, value): Exception.__init__(self) self.value = value def __str__(self): return self.value #------------------------------------------------------------------------- # # Functions to extract data from raw lists (unserialized objects) # #------------------------------------------------------------------------- def _raw_full_surname(raw_surn_data_list): """method for the 'l' symbol: full surnames""" result = "" for raw_surn_data in raw_surn_data_list: result += "%s %s %s " % (raw_surn_data[_PREFIX_IN_LIST], raw_surn_data[_SURNAME_IN_LIST], raw_surn_data[_CONNECTOR_IN_LIST]) return ' '.join(result.split()).strip() def _raw_primary_surname(raw_surn_data_list): """method for the 'm' symbol: primary surname""" global PAT_AS_SURN nrsur = len(raw_surn_data_list) for raw_surn_data in raw_surn_data_list: if raw_surn_data[_PRIMARY_IN_LIST]: #if there are multiple surnames, return the primary. If there #is only one surname, then primary has little meaning, and we #assume a pa/matronymic should not be given as primary as it #normally is defined independently if not PAT_AS_SURN and nrsur == 1 and \ (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO): return '' else: result = "%s %s %s" % (raw_surn_data[_PREFIX_IN_LIST], raw_surn_data[_SURNAME_IN_LIST], raw_surn_data[_CONNECTOR_IN_LIST]) return ' '.join(result.split()) return '' def _raw_primary_surname_only(raw_surn_data_list): """method to obtain the raw primary surname data, so this returns a string """ global PAT_AS_SURN nrsur = len(raw_surn_data_list) for raw_surn_data in raw_surn_data_list: if raw_surn_data[_PRIMARY_IN_LIST]: if not PAT_AS_SURN and nrsur == 1 and \ (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO): return '' else: return raw_surn_data[_SURNAME_IN_LIST] return '' def _raw_primary_prefix_only(raw_surn_data_list): """method to obtain the raw primary surname data""" global PAT_AS_SURN nrsur = len(raw_surn_data_list) for raw_surn_data in raw_surn_data_list: if raw_surn_data[_PRIMARY_IN_LIST]: if not PAT_AS_SURN and nrsur == 1 and \ (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO): return '' else: return raw_surn_data[_PREFIX_IN_LIST] return '' def _raw_primary_conn_only(raw_surn_data_list): """method to obtain the raw primary surname data""" global PAT_AS_SURN nrsur = len(raw_surn_data_list) for raw_surn_data in raw_surn_data_list: if raw_surn_data[_PRIMARY_IN_LIST]: if not PAT_AS_SURN and nrsur == 1 and \ (raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINPATRO or raw_surn_data[_TYPE_IN_LIST][0] == _ORIGINMATRO): return '' else: return raw_surn_data[_CONNECTOR_IN_LIST] return '' def _raw_patro_surname(raw_surn_data_list): """method for the 'y' symbol: patronymic surname""" for raw_surn_data in raw_surn_data_list: if (raw_surn_data[_TYP
ddalex/python-prompt-toolkit
prompt_toolkit/search_state.py
Python
bsd-3-clause
1,105
0.002715
from .enums import IncrementalSearchDirection from .filters import SimpleFilter, Never __all__ = ( 'SearchState', ) class SearchState(object): """ A search 'query'. """ __slots__ = ('text', 'direction', 'ignore_case') def __init__(self, text='', direction=IncrementalSearchDirection.FORWARD, ignore_case=Never()): assert isinstance(ignore_case, SimpleFilter) self.text = text self.direction = direction self.ignore_case = ignore_case def __repr__(self): return '%s(%r, direction=%r, ignore_case=%r)' % ( self.__class__.__name__, self.text, self.direct
ion, self.ignore_case) def __invert__(self): """ Create a new SearchState where backwards becomes forwards and the other way around. """ if self.direction == IncrementalSearchDirection.BACKWARD: direction = IncrementalSearchDirection.FORWARD else: direction = IncrementalSearchDirection.BACKWARD return SearchState(text=self.text, direction=d
irection, ignore_case=self.ignore_case)
jcushman/pywb
pywb/utils/loaders.py
Python
gpl-3.0
9,733
0.002055
""" This module provides loaders for local file system and over http local and remote access """ import os import hmac import urllib #import urllib2 import requests import urlparse import time import pkg_resources from io import open, BytesIO try: from boto import connect_s3 s3_avail = True except ImportError: #pragma: no cover s3_avail = False #================================================================= def is_http(filename): return filename.startswith(('http://', 'https://')) #================================================================= def is_s3(filename): return filename.startswith('s3://') #================================================================= def to_file_url(filename): """ Convert a filename to a file:// url """ url = os.path.abspath(filename) url = urlparse.urljoin('file:', urllib.pathname2url(url)) return url #================================================================= def load_yaml_config(config_file): import yaml configdata = BlockLoader().load(config_file) config = yaml.load(configdata) return config #================================================================= def extract_post_query(method, mime, length, stream, buffered_stream=None): """ Extract a url-encoded form POST from stream If not a application/x-www-form-urlencoded, or no missing content length, return None """ if method.upper() != 'POST': return None if ((not mime or not mime.lower().startswith('application/x-www-form-urlencoded'))): return None try: length = int(length) except (ValueError, TypeError): return None if length <= 0: return None #todo: encoding issues? post_query = '' while length > 0: buff = stream.read(length) length -= len(buff) if not buff: break post_query += buff if buffered_stream: buffered_stream.write(post_query) buffered_stream.seek(0) post_query = urllib.unquote_plus(post_query) return post_query #================================================================= def append_post_query(url, post_query): if not post_query: return url if '?' not in url: url += '?' else: url += '&' url += post_query return url #================================================================= def extract_client_cookie(env, cookie_name): cookie_header = env.get('HTTP_COOKIE') if not cookie_header: return None # attempt to extract cookie_name only inx = cookie_header.find(cookie_name) if inx < 0: return None end_inx = cookie_header.find(';', inx) if end_inx > 0: value = cookie_header[inx:end_inx] else: value = cookie_header[inx:] value = value.split('=') if len(value) < 2: return None value = value[1].strip() return value #================================================================= def read_last_line(fh, offset=256): """ Read last line from a seekable file. Start reading from buff before end of file, and double backwards seek until line break is found. If reached beginning of file (no lines), just return whole file """ fh.seek(0, 2) size = fh.tell() while offset < size: fh.seek(-offset, 2) lines = fh.readlines() if len(lines) > 1: return lines[-1] offset *= 2 fh.seek(0, 0) return fh.readlines()[-1] #================================================================= class BlockLoader(object): """ a loader which can stream blocks of content given a uri, offset and optional length. Currently supports: http/https and file/local file system """ def __init__(self, cookie_maker=None): self.cookie_maker = cookie_maker self.session = None self.s3conn = None def load(self, url, offset=0, length=-1): """ Determine loading method based on uri """ if is_http(url): return self.load_http(url, offset, length) elif is_s3(url): return self.load_s3(url, offset, length) else: return self.load_
file_or_resource(url, offset, length) def load_file_or_resource(self, url, offset=0, length=-1): """ Load a file-like reader from the local file system """ # if starting with . or /, can only be a file path.. file_only = url.startswith(('/', '.')) # convert to filename if url.startswith('file://'): file_only =
True url = urllib.url2pathname(url[len('file://'):]) try: # first, try as file afile = open(url, 'rb') except IOError: if file_only: raise # then, try as package.path/file pkg_split = url.split('/', 1) if len(pkg_split) == 1: raise afile = pkg_resources.resource_stream(pkg_split[0], pkg_split[1]) if offset > 0: afile.seek(offset) if length >= 0: return LimitReader(afile, length) else: return afile @staticmethod def _make_range_header(offset, length): if length > 0: range_header = 'bytes={0}-{1}'.format(offset, offset + length - 1) else: range_header = 'bytes={0}-'.format(offset) return range_header def load_http(self, url, offset, length): """ Load a file-like reader over http using range requests and an optional cookie created via a cookie_maker """ headers = {} if offset != 0 or length != -1: headers['Range'] = self._make_range_header(offset, length) if self.cookie_maker: if isinstance(self.cookie_maker, basestring): headers['Cookie'] = self.cookie_maker else: headers['Cookie'] = self.cookie_maker.make() if not self.session: self.session = requests.Session() r = self.session.get(url, headers=headers, stream=True) return r.raw def load_s3(self, url, offset, length): if not s3_avail: #pragma: no cover raise IOError('To load from s3 paths, ' + 'you must install boto: pip install boto') if not self.s3conn: try: self.s3conn = connect_s3() except Exception: #pragma: no cover self.s3conn = connect_s3(anon=True) parts = urlparse.urlsplit(url) bucket = self.s3conn.get_bucket(parts.netloc) headers = {'Range': self._make_range_header(offset, length)} key = bucket.get_key(parts.path) result = key.get_contents_as_string(headers=headers) key.close() return BytesIO(result) #================================================================= # Signed Cookie-Maker #================================================================= class HMACCookieMaker(object): """ Utility class to produce signed HMAC digest cookies to be used with each http request """ def __init__(self, key, name, duration=10): self.key = key self.name = name # duration in seconds self.duration = duration def make(self, extra_id=''): expire = str(long(time.time() + self.duration)) if extra_id: msg = extra_id + '-' + expire else: msg = expire hmacdigest = hmac.new(self.key, msg) hexdigest = hmacdigest.hexdigest() if extra_id: cookie = '{0}-{1}={2}-{3}'.format(self.name, extra_id, expire, hexdigest) else: cookie = '{0}={1}-{2}'.format(self.name, expire, hexdigest) return cookie #================================================================= # Limit Reader #================================================================= class LimitRea
fin-ger/logitech-m720-config
setup.py
Python
gpl-3.0
2,072
0.02027
""" logitech-m720-config - A config script for Logitech M720 button mappings Copyright (C) 2017 Fin Christensen <christensen.fin@gmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath (path.dirname (__file__)) # Get the long description from the README.rst file with open (path.join (here, "README.md"), encoding = "utf-8") as readme: long_description = readme.read () setup ( name = "m720-config", version = "0.0.1", description = "A config script for Logitech M720 button mappings.", long_description = long_description, url = "", author = "Fin Christensen", author_email = "christensen.fin@gmail.com", license = "GPLv3+", classifiers = [
"Development Status :: 2 - Pre-Alpha", "Environment :: Console", "Intended Audience :: System Administrators", "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3.5", "Programming Langu
age :: Python :: 3 :: Only", ], keywords = "config logitech m720 hid++", packages = find_packages (), install_requires = ["solaar"], extras_require = {}, package_data = { "m720_config": [], }, data_files = [], entry_points = { "console_scripts": [ "m720-config=m720_config:main" ], }, )
plotly/plotly.py
packages/python/plotly/plotly/validators/layout/ternary/aaxis/title/font/_family.py
Python
mit
553
0
import _plotly_utils.basevalidators class FamilyValidator(_plotly_utils.basevalidators.StringValidator): def __init__( self, plotly_name="famil
y", parent_name="layout.ternary.aaxis.title.font", **kwargs ): super(FamilyValidator, self).__init__( plotly_name=plotly_name, parent_name=parent_name, edit_type=kwargs.pop("edit_type", "plot"), no_blank=kwargs.pop("no_blank", True), strict=kwargs.pop("strict", True),
**kwargs )
TailorDev/django-tailordev-biblio
td_biblio/__init__.py
Python
mit
139
0
"""TailorDev Biblio Bibliography management with Django. """ __version__ = "2.0.0" default_app_conf
ig = "td_biblio.apps
.TDBiblioConfig"