repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
---|---|---|---|---|---|---|---|---|
JoeGermuska/worblehat
|
reference/pyarchive/pyarchive/mp3.py
|
Python
|
mit
| 7,381 | 0.022897 |
# PyMM - Python MP3 Manager
# Copyright (C) 2000 Pierre Hjalm <pierre.hjalm@dis.uu.se>
#
# Modified by Alexander Kanavin <ak@sensi.org>
# Removed ID tags support and added VBR support
# Used http://home.swipnet.se/grd/mp3info/ for information
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
""" mp3.py
Reads information from an mp3 file.
This is a python port of code taken from the mpg123 input module of xmms.
"""
import struct
def header(buf):
return struct.unpack(">I",buf)[0]
def head_check(head):
if ((head & 0xffe00000L) != 0xffe00000L):
return 0
if (not ((head >> 17) & 3)):
return 0
if (((head >> 12) & 0xf) == 0xf):
return 0
if ( not ((head >> 12) & 0xf)):
return 0
if (((head >> 10) & 0x3) == 0x3):
return 0
if (((head >> 19) & 1) == 1 and ((head >>
|
17) & 3) == 3 and ((head >> 16) & 1) == 1):
return 0
if ((head & 0xffff0000L) == 0xfffe0000L):
return 0
return 1
def filesize(file):
""" Returns the size of file sans any ID3 tag
"""
f=open(file)
f.seek(0,2)
size=f.tell()
try:
f.seek(-128,2)
except:
f.close()
return 0
buf=f.read(3)
f.close()
if buf=="TAG":
size=size-128
if siz
|
e<0:
return 0
else:
return size
table=[[
[0, 32, 64, 96, 128, 160, 192, 224, 256, 288, 320, 352, 384, 416, 448],
[0, 32, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320, 384],
[0, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 256, 320]],
[
[0, 32, 48, 56, 64, 80, 96, 112, 128, 144, 160, 176, 192, 224, 256],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160],
[0, 8, 16, 24, 32, 40, 48, 56, 64, 80, 96, 112, 128, 144, 160]]]
def decode_header(head):
""" Decode the mp3 header and put the information in a frame structure
"""
freqs=[44100, 48000, 32000, 22050, 24000, 16000, 11025, 12000, 8000]
fr={}
if head & (1 << 20):
if head & (1 << 19):
fr["lsf"]=0
else:
fr["lsf"]=1
fr["mpeg25"] = 0
else:
fr["lsf"] = 1
fr["mpeg25"] = 1
fr["lay"] = 4 - ((head >> 17) & 3)
if fr["mpeg25"]:
fr["sampling_frequency"] = freqs[6 + ((head >> 10) & 0x3)]
else:
fr["sampling_frequency"] = freqs[((head >> 10) & 0x3) + (fr["lsf"] * 3)]
fr["error_protection"] = ((head >> 16) & 0x1) ^ 0x1
fr["bitrate_index"] = ((head >> 12) & 0xf)
fr["bitrate"]=table[fr["lsf"]][fr["lay"]-1][fr["bitrate_index"]]
fr["padding"]=((head>>9) & 0x1)
fr["channel_mode"]=((head>>6) & 0x3)
if fr["lay"]==1:
fr["framesize"]=table[fr["lsf"]][0][fr["bitrate_index"]]*12000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=((fr["framesize"]+fr["padding"])<<2)-4
elif fr["lay"]==2:
fr["framesize"]=table[fr["lsf"]][1][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]
fr["framesize"]=fr["framesize"]+fr["padding"]-1
elif fr["lay"]==3:
fr["framesize"]=table[fr["lsf"]][2][fr["bitrate_index"]]*144000
fr["framesize"]=fr["framesize"]/fr["sampling_frequency"]<<fr["lsf"]
fr["framesize"]=fr["framesize"]+fr["padding"]-4
pass
else:
return 0
return fr
def decode_vbr(buf):
vbr = {}
if buf[:4] != "Xing":
return 0
frames_flag = ord(buf[7]) & 1
if not frames_flag:
return 0
vbr["frames"] = header(buf[8:])
return vbr
def decode_synch_integer(buf):
return (ord(buf[0])<<21)+(ord(buf[1])<<14)+(ord(buf[2])<<7)+ord(buf[3])
def detect_mp3(filename):
""" Determines whether this is an mp3 file and if so reads information
from it.
"""
try:
f=open(filename,"rb")
except:
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if tmp[:3] == 'ID3':
try:
tmp = f.read(6)
f.seek(decode_synch_integer(tmp[2:])+10)
tmp=f.read(4)
except:
f.close()
return 0
try:
head=header(tmp)
except:
return 0
while not head_check(head):
# This is a real time waster, but an mp3 stream can start anywhere
# in a file so we have to search the entire file which can take a
# while for large non-mp3 files.
try:
buf=f.read(1024)
except:
f.close()
return 0
if buf=="":
f.close()
return 0
for i in range(0,len(buf)-1):
head=long(head)<<8;
head=head|ord(buf[i])
if head_check(head):
f.seek(i+1-len(buf),1)
break
mhead=decode_header(head)
if mhead:
# Decode VBR header if there's any.
pos = f.tell()
mhead["vbr"] = 0
if not mhead["lsf"]:
if mhead["channel_mode"] == 3:
vbrpos = 17
else:
vbrpos = 32
else:
if mhead["channel_mode"] == 3:
vbrpos = 9
else:
vbrpos = 17
try:
f.seek(vbrpos,1)
vbr = decode_vbr(f.read(12))
mhead["vbrframes"] = vbr["frames"]
if mhead["vbrframes"] >0:
mhead["vbr"] = 1
except:
pass
# We found something which looks like a MPEG-header
# We check the next frame too, to be sure
if f.seek(pos+mhead["framesize"]):
f.close()
return 0
try:
tmp=f.read(4)
except:
f.close()
return 0
if len(tmp)!=4:
f.close()
return 0
htmp=header(tmp)
if not (head_check(htmp) and decode_header(htmp)):
f.close()
return 0
f.close()
# If we have found a valid mp3 add some more info the head data.
if mhead:
mhead["filesize"]=filesize(filename)
if not mhead["vbr"]:
if mhead["bitrate"] and mhead["filesize"]:
mhead["time"]=int(float(mhead["filesize"])/(mhead["bitrate"]*1000)*8)
else:
mhead["time"]=0
else:
if mhead["filesize"] and mhead["sampling_frequency"]:
medframesize = float(mhead["filesize"])/float(mhead["vbrframes"])
if mhead["lsf"]:
coef = 12
else:
coef = 144
vbrrate = medframesize*mhead["sampling_frequency"]/(1000*coef)
mhead["time"]=int(float(mhead["filesize"])/(vbrrate*1000)*8)
mhead["vbrrate"] = int(vbrrate)
return mhead
else:
return 0
if __name__=="__main__":
import sys
mp3info=detect_mp3(sys.argv[1])
if mp3info:
print mp3info
else:
print "Not an mp3 file."
|
ellisonleao/pyshorteners
|
pyshorteners/shorteners/tinyurl.py
|
Python
|
gpl-3.0
| 1,019 | 0 |
from ..base import BaseShortener
from ..exceptions import ShorteningErrorException
class Shortener(BaseShortener):
"""
TinyURL.com shortener implementation
Example:
>>> import pyshorteners
>>> s = pyshorteners.Shortener()
>>> s.tinyurl.short('http://www.google.com')
|
'http://tinyurl.com/TEST'
>>> s.tinyurl.expand('http://tinyurl.com/test')
'http://www.google.com'
"""
api_url = "http://tinyurl.com/api-create.php"
def short(self, url):
"""Short implementation for TinyURL.com
Args:
url: the URL you want to shorten
Returns:
A string containing the
|
shortened URL
Raises:
ShorteningErrorException: If the API returns an error as response
"""
url = self.clean_url(url)
response = self._get(self.api_url, params=dict(url=url))
if response.ok:
return response.text.strip()
raise ShorteningErrorException(response.content)
|
dsanders11/django-autocomplete-light
|
autocomplete_light/example_apps/basic/forms.py
|
Python
|
mit
| 1,043 | 0.000959 |
import a
|
utocomplete_light.shortcuts as autocomplete_light
from django import VERSION
from .models import *
try:
import genericm2m
except ImportError:
genericm2m = None
try:
import taggit
except Import
|
Error:
taggit = None
class DjangoCompatMeta:
if VERSION >= (1, 6):
fields = '__all__'
class FkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = FkModel
class OtoModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = OtoModel
class MtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = MtmModel
class GfkModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GfkModel
if genericm2m:
class GmtmModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = GmtmModel
if taggit:
class TaggitModelForm(autocomplete_light.ModelForm):
class Meta(DjangoCompatMeta):
model = TaggitModel
|
HellTech/NAG_IoE_2016
|
30_HellTech_1602_1/08_Meteostanice_GUI_v2/Meteo2/base64function.py
|
Python
|
gpl-3.0
| 119 | 0.05042 |
import ba
|
se64
def toBase64(s):
return base64.b64encode(str(s))
de
|
f fromBase64(s):
return base64.b64decode(str(s))
|
vasconcelosfer/odoo-odisea
|
odisea/models/representative.py
|
Python
|
lgpl-3.0
| 4,870 | 0.015606 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from openerp import models, fields, api, tools, _
class odisea_representative(models.Model):
"""Representative"""
_name = 'odisea.representative'
_description = 'Representative'
@api.multi
def _has_image(self):
return dict((p.id, bool(p.image)) for p in self)
name = fields.Char(string='Name', required=True)
cuit = fields.Char(string='CUIT', size=13)
title = fields.Many2one('res.partner.title', 'Title')
company = fields.Char(string='Company')
ref = fields.Char('Contact Reference', select=1)
website = fields.Char('Website', help="Website of Partner or Company")
comment = fields.Text('Notes')
category_id = fields.Many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags')
active = fields.Boolean('Active', default=True)
street = fields.Char('Street')
street2 = fields.Char('Street2')
zip = fields.Char('Zip', size=24, change_default=True)
city = fields.Char('City')
state_id = fields.Many2one("res.country.state", 'State', ondelete='restrict')
country_id = fields.Many2one('res.country', 'Country', ondelete='restrict')
email = fields.Char('Email')
phone = fields.Char('Phone')
fax = fields.Char('Fax')
mobile = fields.Char('Mobile')
birthdate = fields.Char('Birthdate')
function = fields.Char('Job Position')
is_company = fields.Boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person")
use_parent_address = fields.Boolean('Use Company Address', help="Select this if you want to set company's address information for this contact")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px")
image_medium = fields.Binary(compute="_get_image",
string="Medium-sized image",
store= False,
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views.")
image_small = fields.Binary(compute="_get_image",
string="Small-s
|
ized image",
store= False,
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required.")
|
has_image = fields.Boolean(compute=_has_image)
color = fields.Integer('Color Index')
@api.multi
def onchange_state(self, state_id):
if state_id:
state = self.env['res.country.state'].browse(state_id)
return {'value': {'country_id': state.country_id.id}}
return {}
@api.multi
def onchange_type(self, is_company):
value = {'title': False}
if is_company:
value['use_parent_address'] = False
domain = {'title': [('domain', '=', 'partner')]}
else:
domain = {'title': [('domain', '=', 'contact')]}
return {'value': value, 'domain': domain}
@api.one
@api.depends("image")
def _get_image(self):
""" calculate the images sizes and set the images to the corresponding
fields
"""
image = self.image
# check if the context contains the magic `bin_size` key
if self.env.context.get("bin_size"):
# refetch the image with a clean context
image = self.env[self._name].with_context({}).browse(self.id).image
data = tools.image_get_resized_images(image, return_big=True, avoid_resize_big=False)
self.image_big = data["image"]
self.image_medium = data["image_medium"]
self.image_small = data["image_small"]
return True
|
madhavsuresh/chimerascan
|
chimerascan/deprecated/chimerascan_index_v1.py
|
Python
|
gpl-3.0
| 5,779 | 0.003634 |
#!/usr/bin/env python
'''
Created on Jan 5, 2011
@author: mkiyer
chimerascan: chimeric transcript discovery using RNA-seq
Copyright (C) 2011 Matthew Iyer
This program is free software: you can redistribute it and
|
/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This
|
program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import logging
import os
import shutil
import subprocess
import sys
from optparse import OptionParser
# local imports
import chimerascan.pysam as pysam
from chimerascan.lib.feature import GeneFeature
from chimerascan.lib.seq import DNA_reverse_complement
from chimerascan.lib.config import JOB_ERROR, JOB_SUCCESS, ALIGN_INDEX, GENE_REF_PREFIX, GENE_FEATURE_FILE
from chimerascan.lib.base import check_executable
BASES_PER_LINE = 50
def split_seq(seq, chars_per_line):
pos = 0
newseq = []
while pos < len(seq):
if pos + chars_per_line > len(seq):
endpos = len(seq)
else:
endpos = pos + chars_per_line
newseq.append(seq[pos:endpos])
pos = endpos
return '\n'.join(newseq)
def bed12_to_fasta(gene_feature_file, reference_seq_file):
ref_fa = pysam.Fastafile(reference_seq_file)
for g in GeneFeature.parse(open(gene_feature_file)):
exon_seqs = []
error_occurred = False
for start, end in g.exons:
seq = ref_fa.fetch(g.chrom, start, end)
if not seq:
logging.warning("gene %s exon %s:%d-%d not found in reference" %
(g.tx_name, g.chrom, start, end))
error_occurred = True
break
exon_seqs.append(seq)
if error_occurred:
continue
# make fasta record
seq = ''.join(exon_seqs)
if g.strand == '-':
seq = DNA_reverse_complement(seq)
# break seq onto multiple lines
seqlines = split_seq(seq, BASES_PER_LINE)
yield (">%s range=%s:%d-%d gene=%s strand=%s\n%s" %
(GENE_REF_PREFIX + g.tx_name, g.chrom, start, end, g.strand, g.gene_name, seqlines))
ref_fa.close()
def create_chimerascan_index(output_dir, genome_fasta_file,
gene_feature_file,
bowtie_build_bin):
# create output dir if it does not exist
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logging.info("Created index directory: %s" % (output_dir))
# create FASTA index file
index_fasta_file = os.path.join(output_dir, ALIGN_INDEX + ".fa")
fh = open(index_fasta_file, "w")
# copy reference fasta file to output dir
logging.info("Adding reference genome to index...")
shutil.copyfileobj(open(genome_fasta_file), fh)
# extract sequences from gene feature file
logging.info("Adding gene models to index...")
for fa_record in bed12_to_fasta(gene_feature_file, genome_fasta_file):
print >>fh, fa_record
fh.close()
# copy gene bed file to index directory
shutil.copyfile(gene_feature_file, os.path.join(output_dir, GENE_FEATURE_FILE))
# index the combined fasta file
logging.info("Indexing FASTA file...")
fh = pysam.Fastafile(index_fasta_file)
fh.close()
# build bowtie index on the combined fasta file
logging.info("Building bowtie index...")
bowtie_index_name = os.path.join(output_dir, ALIGN_INDEX)
args = [bowtie_build_bin, index_fasta_file, bowtie_index_name]
if subprocess.call(args) != os.EX_OK:
logging.error("bowtie-build failed to create alignment index")
return JOB_ERROR
logging.info("chimerascan index created successfully")
return JOB_SUCCESS
def main():
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
parser = OptionParser("usage: %prog [options] <reference_genome.fa> <gene_models.txt> <index_output_dir>")
parser.add_option("--bowtie-build-bin", dest="bowtie_build_bin", default="bowtie-build",
help="Path to 'bowtie-build' program")
options, args = parser.parse_args()
# check command line arguments
if len(args) < 3:
parser.error("Incorrect number of command line arguments")
ref_fasta_file = args[0]
gene_feature_file = args[1]
output_dir = args[2]
# check that input files exist
if not os.path.isfile(ref_fasta_file):
parser.error("Reference fasta file '%s' not found" % (ref_fasta_file))
if not os.path.isfile(gene_feature_file):
parser.error("Gene feature file '%s' not found" % (gene_feature_file))
# check that output dir is not a regular file
if os.path.exists(output_dir) and (not os.path.isdir(output_dir)):
parser.error("Output directory name '%s' exists and is not a valid directory" % (output_dir))
# check that bowtie-build program exists
if check_executable(options.bowtie_build_bin):
logging.debug("Checking for 'bowtie-build' binary... found")
else:
parser.error("bowtie-build binary not found or not executable")
# run main index creation function
retcode = create_chimerascan_index(output_dir, ref_fasta_file, gene_feature_file,
options.bowtie_build_bin)
sys.exit(retcode)
if __name__ == '__main__':
main()
|
stefanw/froide
|
froide/georegion/migrations/0002_auto_20180409_1007.py
|
Python
|
mit
| 617 | 0.001621 |
# -*- coding: utf-8 -*-
# Gene
|
rated by Django 1.11.9 on 2018-04-09 08:07
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('georegion', '0001_initial_squashed_0004_auto_20180307_2026'),
]
operations = [
migrations.AlterField(
model_name='georegion',
name='part_of',
field=models.ForeignKey(blank=True,
|
null=True, on_delete=django.db.models.deletion.SET_NULL, to='georegion.GeoRegion', verbose_name='Part of'),
),
]
|
blorenz/btce-api
|
btceapi/common.py
|
Python
|
mit
| 5,264 | 0.00057 |
# Copyright (c) 2013 Alan McIntyre
import httplib
import json
import decimal
import re
decimal.getcontext().rounding = decimal.ROUND_DOWN
exps = [decimal.Decimal("1e-%d" % i) for i in range(16)]
btce_domain = "btc-e.com"
all_currencies = ("btc", "usd", "rur", "ltc", "nmc", "eur", "nvc",
"trc", "ppc", "ftc", "xpm")
all_pairs = ("btc_usd", "btc_rur", "btc_eur", "ltc_btc", "ltc_usd",
"ltc_rur", "ltc_eur", "nmc_btc", "nmc_usd", "nvc_btc",
"nvc_usd", "usd_rur", "eur_usd", "trc_btc", "ppc_btc",
"ppc_usd", "ftc_btc", "xpm_btc")
max_digits = {"btc_usd": 3,
"btc_rur": 5,
"btc_eur": 5,
"ltc_btc": 5,
"ltc_usd": 6,
"ltc_rur": 5,
"ltc_eur": 3,
"nmc_btc": 5,
"nmc_usd": 3,
"nvc_btc": 5,
"nvc_usd": 3,
"usd_rur": 5,
"eur_usd": 5,
"trc_btc": 5,
"ppc_btc": 5,
"ppc_usd": 3,
"ftc_btc": 5,
"xpm_btc": 5}
min_orders = {"btc_usd": decimal.Decimal("0.01"),
"btc_rur": decimal.Decimal("0.1"),
"btc_eur": decimal.Decimal("0.1"),
"ltc_btc": decimal.Decimal("0.1"),
"ltc_usd": decimal.Decimal("0.1"),
"ltc_rur": decimal.Decimal("0.1"),
"ltc_eur": decimal.Decimal("0.1"),
"nmc_btc": decimal.Decimal("0.1"),
"nmc_usd": decimal.Decimal("0.1"),
"nvc_btc": decimal.Decimal("0.1"),
"nvc_usd": decimal.Decimal("0.1"),
"usd_rur": decimal.Decimal("0.1"),
"eur_usd": decimal.Decimal("0.1"),
"trc_btc": decimal.Decimal("0.1"),
"ppc_btc": decimal.Decimal("0.1"),
"ppc_usd": decimal.Decimal("0.1"),
"ftc_btc": decimal.Decimal("0.1"),
"xpm_btc": decimal.Decimal("0.1")}
def parseJSONResponse(response):
def parse_decimal(var):
return decimal.Decimal(var)
try:
r = json.loads(response, parse_float=parse_decimal,
parse_int=parse_decimal)
except Exception as e:
msg = "Error while attempting to parse JSON response:"\
" %s\nResponse:\n%r" % (e, response)
raise Exception(msg)
return r
HEADER_COOKIE_RE = re.compile(r'__cfduid=([a-f0-9]{46})')
BODY_COOKIE_RE = re.compile(r'document\.cookie="a=([a-f0-9]{32});path=/;";')
class BTCEConnection:
def __init__(self, timeout=30):
self.conn = httplib.HTTPSConnection(btce_domain, timeout=timeout)
self.cookie = None
def close(self):
self.conn.close()
def getCookie(self):
self.cookie = ""
self.conn.request("GET", '/')
response = self.conn.getresponse()
setCookieHeader = response.getheader("Set-Cookie")
match = HEADER_COOKIE_RE.search(setCookieHeader)
if match:
self.cookie = "__cfduid=" + match.group(1)
match = BODY_COOKIE_RE.search(response.read())
if match:
if self.cookie != "":
self.cookie += '; '
self.cookie += "a=" + match.group(1)
def makeRequest(self, url, extra_headers=None, params="", with_cookie=False):
headers = {"Content-type": "application/x-www-form-urlencoded"}
if extra_headers is not None:
headers.update(extra_headers)
if with_cookie:
if self.cookie is None:
self.getCookie()
headers.update({"Cookie": self.cookie})
self.conn.request("POST", url, params, headers)
response = self.conn.getresponse().read()
return response
def makeJSONRequest(self, url, extra_headers=None, params=""):
response = self.makeRequest(url, extra_headers, params)
return parseJSONResponse(response)
def validatePair(pair):
if pair not in al
|
l_pairs:
if "_" in pair:
a, b = pair.split("_")
swapped_pair = "%s_%s" % (b, a)
if swapped_pair in all_pairs:
msg = "Unrecognized pair: %r (did you mean %s?)"
msg = msg % (pair, swapped_pair)
raise Exception(msg)
raise Exception("Unrecognized pair: %r" % pair)
def validateOrder(pair, trade_type, rate, amount):
validatePair(pair)
if trade_type not in ("buy", "sell"):
raise Excepti
|
on("Unrecognized trade type: %r" % trade_type)
minimum_amount = min_orders[pair]
formatted_min_amount = formatCurrency(minimum_amount, pair)
if amount < minimum_amount:
msg = "Trade amount too small; should be >= %s" % formatted_min_amount
raise Exception(msg)
def truncateAmountDigits(value, digits):
quantum = exps[digits]
return decimal.Decimal(value).quantize(quantum)
def truncateAmount(value, pair):
return truncateAmountDigits(value, max_digits[pair])
def formatCurrencyDigits(value, digits):
s = str(truncateAmountDigits(value, digits))
dot = s.index(".")
while s[-1] == "0" and len(s) > dot + 2:
s = s[:-1]
return s
def formatCurrency(value, pair):
return formatCurrencyDigits(value, max_digits[pair])
|
meatcomputer/opencog
|
opencog/python/examples/test.py
|
Python
|
agpl-3.0
| 121 | 0.041322 |
class a(object):
pass
cla
|
ss b(a):
pass
print a.__subclasses__()
class c(a):
pass
print a.__subcl
|
asses__()
|
jeromecc/doctoctocbot
|
src/conversation/migrations/0033_twitterusertimeline_last_api_call.py
|
Python
|
mpl-2.0
| 423 | 0 |
# Generated by Django 2.2.17 on 2021-01-31 06:11
from django.db import mi
|
grations, models
class Migration(migrations.Migration):
dependencies = [
('conversation', '0032_
|
twitterusertimeline'),
]
operations = [
migrations.AddField(
model_name='twitterusertimeline',
name='last_api_call',
field=models.DateTimeField(blank=True, null=True),
),
]
|
dawncold/expenditure-application
|
expenditure_application/__init__.py
|
Python
|
apache-2.0
| 90 | 0 |
# -*- coding: UTF-8 -*-
|
from __future__ import unicode_literals, print
|
_function, division
|
schansge/pyfdtd-gui
|
src/dialogs/__init__.py
|
Python
|
gpl-3.0
| 797 | 0 |
# GUI for pyfdtd using PySide
# Copyright (C) 2012 Patrik Gebhardt
# Contact: grosser.knuff@googlemail.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fou
|
ndation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from newLayer import
|
*
from newSimulation import *
|
neilhan/tensorflow
|
tensorflow/python/kernel_tests/rnn_test.py
|
Python
|
apache-2.0
| 95,137 | 0.00987 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for rnn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
import timeit
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.util import nest
class Plus1RNNCell(tf.nn.r
|
nn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1)."""
@property
def output_size(self):
return 5
@property
def state_size(self):
return 5
def __call__(self, input_, state, scope=None):
return (input_ + 1, state + 1)
class DummyMultiDimensionalLSTM(tf.nn.rnn_cell.RNNCell):
|
"""LSTM Cell generating (output, new_state) = (input + 1, state + 1).
The input to this cell may have an arbitrary number of dimensions that follow
the preceding 'Time' and 'Batch' dimensions.
"""
def __init__(self, dims):
"""Initialize the Multi-dimensional LSTM cell.
Args:
dims: tuple that contains the dimensions of the output of the cell,
without including 'Time' or 'Batch' dimensions.
"""
if not isinstance(dims, tuple):
raise TypeError("The dimensions passed to DummyMultiDimensionalLSTM"
"should be a tuple of ints.")
self._dims = dims
self._output_size = tf.TensorShape(self._dims)
self._state_size = (tf.TensorShape(self._dims), tf.TensorShape(self._dims))
@property
def output_size(self):
return self._output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
h, c = state
return (input_ + 1, (h + 1, c + 1))
class NestedRNNCell(tf.nn.rnn_cell.RNNCell):
"""RNN Cell generating (output, new_state) = (input + 1, state + 1).
The input, output and state of this cell is a tuple of two tensors.
"""
@property
def output_size(self):
return (5, 5)
@property
def state_size(self):
return (6, 6)
def __call__(self, input_, state, scope=None):
h, c = state
x, y = input_
return ((x + 1, y + 1), (h + 1, c + 1))
class TestStateSaver(object):
def __init__(self, batch_size, state_size):
self._batch_size = batch_size
self._state_size = state_size
self.saved_state = {}
def state(self, name):
if isinstance(self._state_size, dict):
state_size = self._state_size[name]
else:
state_size = self._state_size
if isinstance(state_size, int):
state_size = (state_size,)
elif isinstance(state_size, tuple):
pass
else:
raise TypeError("state_size should either be an int or a tuple")
return tf.zeros((self._batch_size,) + state_size)
def save_state(self, name, state):
self.saved_state[name] = state
return tf.identity(state)
class RNNTest(tf.test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
def testInvalidSequenceLengthShape(self):
cell = Plus1RNNCell()
inputs = [tf.placeholder(tf.float32, shape=(3, 4))]
with self.assertRaisesRegexp(ValueError, "must be a vector"):
tf.nn.rnn(cell, inputs, dtype=tf.float32, sequence_length=4)
with self.assertRaisesRegexp(ValueError, "must be a vector"):
tf.nn.dynamic_rnn(
cell, tf.pack(inputs), dtype=tf.float32, sequence_length=[[4]])
def testRNN(self):
cell = Plus1RNNCell()
batch_size = 2
input_size = 5
max_length = 8 # unrolled up to this length
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape(), inp.get_shape())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
# Outputs
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
# Final state
self.assertAllClose(
values[-1],
max_length * np.ones((batch_size, input_size), dtype=np.float32))
def testDropout(self):
cell = Plus1RNNCell()
full_dropout_cell = tf.nn.rnn_cell.DropoutWrapper(
cell, input_keep_prob=1e-12, seed=0)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("share_scope"):
outputs, state = tf.nn.rnn(cell, inputs, dtype=tf.float32)
with tf.variable_scope("drop_scope"):
dropped_outputs, _ = tf.nn.rnn(
full_dropout_cell, inputs, dtype=tf.float32)
self.assertEqual(len(outputs), len(inputs))
for out, inp in zip(outputs, inputs):
self.assertEqual(out.get_shape().as_list(), inp.get_shape().as_list())
self.assertEqual(out.dtype, inp.dtype)
with self.test_session(use_gpu=False) as sess:
input_value = np.random.randn(batch_size, input_size)
values = sess.run(outputs + [state],
feed_dict={inputs[0]: input_value})
full_dropout_values = sess.run(dropped_outputs,
feed_dict={inputs[0]: input_value})
for v in values[:-1]:
self.assertAllClose(v, input_value + 1.0)
for d_v in full_dropout_values[:-1]: # Add 1.0 to dropped_out (all zeros)
self.assertAllClose(d_v, np.ones_like(input_value))
def _testDynamicCalculation(self, use_gpu):
cell = Plus1RNNCell()
sequence_length = tf.placeholder(tf.int64)
batch_size = 2
input_size = 5
max_length = 8
inputs = max_length * [
tf.placeholder(tf.float32, shape=(batch_size, input_size))]
with tf.variable_scope("drop_scope"):
dynamic_outputs, dynamic_state = tf.nn.rnn(
cell, inputs, sequence_length=sequence_length, dtype=tf.float32)
self.assertEqual(len(dynamic_outputs), len(inputs))
with self.test_session(use_gpu=use_gpu) as sess:
input_value = np.random.randn(batch_size, input_size)
dynamic_values = sess.run(dynamic_outputs,
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
dynamic_state_value = sess.run([dynamic_state],
feed_dict={inputs[0]: input_value,
sequence_length: [2, 3]})
# outputs are fully calculated for t = 0, 1
for v in dynamic_values[:2]:
self.assertAllClose(v, input_value + 1.0)
# outputs at t = 2 are zero for entry 0, calculated for entry 1
self.assertAllClose(
dynamic_values[2],
np.vstack((
np.zeros((input_size)),
1.0 + input_value[1, :])))
# outputs at t = 3+ are zero
for v in dynamic_values[3:]:
self.assertAllEqual(v, np.zeros_like(input_value))
# the final states are:
# entry 0: the values from the calculation at t=1
# entry 1: the values from the calculation at t=2
self.assertAllEqual(
dynamic_state_value[0],
np.vstack((
1.0 * (1 + 1) * np.ones((input_size)),
1.0 * (2 + 1) * np.ones((input_size)))))
def testDynamicCalculation(self):
self._testDynam
|
stonebig/numba
|
numba/roc/hsadrv/driver.py
|
Python
|
bsd-2-clause
| 51,876 | 0.001292 |
"""
HSA driver bridge implementation
"""
from collections.abc import Sequence
import sys
import atexit
import os
import ctypes
import struct
import traceback
import weakref
import logging
from contextlib import contextmanager
from collections import defaultdict, deque
from functools import total_ordering
from numba import mviewbuf
from numba.core import utils, config
from .error import HsaSupportError, HsaDriverError, HsaApiError
from numba.roc.hsadrv import enums, enums_ext, drvapi
import numpy as np
_logger = logging.getLogger(__name__)
class HsaKernelTimedOut(HsaDriverError):
pass
def _device_type_to_string(device):
try:
return ['CPU', 'GPU', 'DSP'][device]
except IndexError:
return 'Unknown'
DEFAULT_HSA_DRIVER = '/opt/rocm/lib/libhsa-runtime64.so'
def _find_driver():
envpath = os.environ.get('NUMBA_HSA_DRIVER', DEFAULT_HSA_DRIVER)
if envpath == '0':
# Force fail
_raise_driver_not_found()
# Determine DLL type
if (struct.calcsize('P') != 8
or sys.platform == 'win32'
or sys.platform == 'darwin'):
_raise_platform_not_supported()
else:
# Assume to be *nix like and 64 bit
dlloader = ctypes.CDLL
dldir = ['/usr/lib', '/usr/lib64']
dlname = 'libhsa-runtime64.so'
if envpath is not None:
try:
envpath = os.path.abspath(envpath)
except ValueError:
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid path" %
envpath)
if not os.path.isfile(envpath):
raise HsaSupportError("NUMBA_HSA_DRIVER %s is not a valid file "
"path. Note it must be a filepath of the .so/"
".dll/.dylib or the driver" % envpath)
candidates = [envpath]
else:
# First search for the name in the default library path.
# If that is not found, try the specific path.
candidates = [dlname] + [os.path.join(x, dlname) for x in dldir]
# Load the driver; Collect driver error information
path_not_exist = []
driver_load_error = []
for path in candidates:
try:
dll = dlloader(path)
except OSError as e:
# Problem opening the DLL
path_not_exist.append(not os.path.isfile(path))
driver_load_error.append(e)
else:
return dll
# Problem loading driver
if all(path_not_exist):
_raise_driver_not_found()
else:
errmsg = '\n'.join(str(e) for e in driver_load_error)
_raise_driver_error(errmsg)
PLATFORM_NOT_SUPPORTED_ERROR = """
HSA is not currently supported on this platform ({0}).
"""
def _raise_platform_not_supported():
raise HsaSupportError(PLATFORM_NOT_SUPPORTED_ERROR.format(sys.platform))
DRIVER_NOT_FOUND_MSG = """
The HSA runtime library cannot be found.
If you are sure that the HSA is installed, try setting environment
variable NUMBA_HSA_DRIVER with the file path of the HSA runtime shared
library.
"""
def _raise_driver_not_found():
raise HsaSupportError(DRIVER_NOT_FOUND_MSG)
DRIVER_LOAD_ERROR_MSG = """
A HSA runtime library was found, but failed to load with error:
%s
"""
def _raise_driver_error(e):
raise HsaSupportError(DRIVER_LOAD_ERROR_MSG % e)
MISSING_FUNCTION_ERRMSG = """driver missing function: %s.
"""
class Recycler(object):
def __init__(self):
self._garbage = []
self.enabled = True
def free(self, obj):
self._garbage.append(obj)
self.service()
def _cleanup(self):
for obj in self._garbage:
obj._finalizer(obj)
del self._garbage[:]
def service(self):
if self.enabled:
if len(self._garbage) > 10:
self._cleanup()
def drain(self):
self._cleanup()
self.enabled = False
# The Driver ###########################################################
class Driver(object):
"""
Driver API functions are lazily bound.
"""
_singleton = None
_agent_map = None
_api_prototypes = drvapi.API_PROTOTYPES # avoid premature GC at exit
_hsa_properties = {
'version_major': (enums.HSA_SYSTEM_INFO_VERSION_MAJOR, ctypes.c_uint16),
'version_minor': (enums.HSA_SYSTEM_INFO_VERSION_MINOR, ctypes.c_uint16),
'timestamp': (enums.HSA_SYSTEM_INFO_TIMESTAMP, ctypes.c_uint64),
'timestamp_frequency': (enums.HSA_SYSTEM_INFO_TIMESTAMP_FREQUENCY, ctypes.c_uint16),
'signal_max_wait': (enums.HSA_SYSTEM_INFO_SIGNAL_MAX_WAIT, ctypes.c_uint64),
}
def __new__(cls):
obj = cls._singleton
if obj is not None:
return obj
else:
obj = object.__new__(cls)
cls._singleton = obj
return obj
def __init__(self):
try:
if config.DISABLE_HSA:
raise HsaSupportError("HSA disabled by user")
self.lib = _find_driver()
self.is_initialized = False
self.initialization_error = None
except HsaSupportError as e:
self.is_initialized = True
self.initialization_error = e
self._agent_map = None
self._programs = {}
self._recycler = Recycler()
self._active_streams = weakref.WeakSet()
def _initialize_api(self):
if self.is_initialized:
return
self.is_initialized = True
try:
self.hsa_init()
except HsaApiError as e:
self.initialization_error = e
raise HsaDriverError("Error at driver init: \n%s:" % e)
else:
@atexit.register
def shutdown():
try:
for agent in self.agents:
|
agent.release()
except AttributeError:
# this is because no agents initialised
# so self.agents i
|
sn't present
pass
else:
self._recycler.drain()
def _initialize_agents(self):
if self._agent_map is not None:
return
self._initialize_api()
agent_ids = []
def on_agent(agent_id, ctxt):
agent_ids.append(agent_id)
return enums.HSA_STATUS_SUCCESS
callback = drvapi.HSA_ITER_AGENT_CALLBACK_FUNC(on_agent)
self.hsa_iterate_agents(callback, None)
agent_map = dict((agent_id, Agent(agent_id)) for agent_id in agent_ids)
self._agent_map = agent_map
@property
def is_available(self):
self._initialize_api()
return self.initialization_error is None
@property
def agents(self):
self._initialize_agents()
return self._agent_map.values()
def create_program(self, model=enums.HSA_MACHINE_MODEL_LARGE,
profile=enums.HSA_PROFILE_FULL,
rounding_mode=enums.HSA_DEFAULT_FLOAT_ROUNDING_MODE_DEFAULT,
options=None):
program = drvapi.hsa_ext_program_t()
assert options is None
self.hsa_ext_program_create(model, profile, rounding_mode,
options, ctypes.byref(program))
return Program(program)
def create_signal(self, initial_value, consumers=None):
if consumers is None:
consumers = tuple(self.agents)
consumers_len = len(consumers)
consumers_type = drvapi.hsa_agent_t * consumers_len
consumers = consumers_type(*[c._id for c in consumers])
result = drvapi.hsa_signal_t()
self.hsa_signal_create(initial_value, consumers_len, consumers,
ctypes.byref(result))
return Signal(result.value)
def __getattr__(self, fname):
# Initialize driver
self._initialize_api()
# First try if it is an hsa property
try:
enum, typ = self._hsa_properties[fname]
result = typ()
self.hsa_system_get_info(enum, ctypes.byref(result))
return result.value
except KeyError:
pass
# if not a prop
|
markashleybell/ExpandTabsOnLoad
|
ExpandTabsOnLoad.py
|
Python
|
mit
| 630 | 0 |
import os
import re
import sublime
import sublime_plugin
class ExpandTabsOnLoad(sublime_plugin.EventListener):
# Run ST's 'expand_tabs' command when opening a file,
# only if there are any tab characters in the file
|
def on_load(self, view):
expand_tabs = view.settings().get("expand_tabs_on_load", False)
if expand_tabs and view.find("\t", 0):
view.run_command("expand_tabs", {"set_translate_tabs": True})
tab_size = view.se
|
ttings().get("tab_size", 0)
message = "Converted tab characters to {0} spaces".format(tab_size)
sublime.status_message(message)
|
ARMmbed/yotta_osx_installer
|
workspace/lib/python2.7/site-packages/cryptography/hazmat/primitives/constant_time.py
|
Python
|
apache-2.0
| 798 | 0 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repos
|
itory
# for complete details.
from __future__ import absolute_import, division, print_function
import hmac
from cryptography.hazmat.bindings._constant_time import lib
if hasattr(hmac, "compare_digest"):
def bytes_eq(a, b):
if not isinstance(a, bytes) or not isinstance(b,
|
bytes):
raise TypeError("a and b must be bytes.")
return hmac.compare_digest(a, b)
else:
def bytes_eq(a, b):
if not isinstance(a, bytes) or not isinstance(b, bytes):
raise TypeError("a and b must be bytes.")
return lib.Cryptography_constant_time_bytes_eq(
a, len(a), b, len(b)
) == 1
|
bnaul/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
Python
|
bsd-3-clause
| 26,768 | 0 |
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <denis-alexander.engemann@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from scipy.special import expit
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import assert_warns_message
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils.extmath import density
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _incremental_mean_and_var
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.utils.extmath import softmax
from sklearn.utils.extmath import stable_cumsum
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.datasets import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert density(X_) == density(X)
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis=axis)
assert_array_equal(mode, mode2)
assert_array_equal(score, score2)
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def check_randomized_svd_low_rank(dtype):
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
decimal = 5 if dtype == np.float32 else 7
dtype = np.dtype(dtype)
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0).astype(dtype, copy=False)
assert X.shape == (n_samples, n_features)
# compute the singular values of X using the slow exact method
U, s, Vt = linalg.svd(X, full_matrices=False)
# Convert the singular values to the specific dtype
U = U.astype(dtype, copy=False)
s = s.astype(dtype, copy=False)
Vt = Vt.astype(dtype, copy=False)
for normalizer in ['auto', 'LU', 'QR']: # 'none' would not be stable
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(
X, k, power_iteration_normalizer=normalizer, random_state=0)
# If the input dtype is float, then the output dtype is float of the
# same bit size (f32 is not upcast to f64)
# But if the input dtype is int, the output dtype is float64
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype == np.float64
assert sa.dtype == np.float64
assert Va.dtype == np.float64
assert Ua.shape == (n_samples, k)
assert sa.shape == (k,)
assert Va.shape == (k, n_features)
# ensure that the singular values of both methods are equal up to the
# real rank of the matrix
assert_almost_equal(s[:k], sa, decimal=decimal)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], Vt[:k, :]), np.dot(Ua, Va),
decimal=decimal)
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = \
randomized_svd(X, k, power_iteration_normalizer=normalizer,
random_state=0)
if dtype.kind == 'f':
assert Ua.dtype == dtype
assert sa.dtype == dtype
assert Va.dtype == dtype
else:
assert Ua.dtype.kind == 'f'
assert sa.dtype.kind == 'f'
assert Va.dtype.kind == 'f'
assert_almost_equal(s[:rank], sa[:rank], decimal=decimal)
@pytest.mark.parametrize('dtype',
(np.int32, np.int64, np.float32, np.float64))
def test_randomized_svd_low_rank_all_dtypes(dtype):
check_randomized_svd_low_rank(dtype)
@pytest.mark.parametrize('dtype',
(np.float32, np.float64))
def test_row_norms(dtype):
X = np.random.RandomState(42).randn(100, 100)
if dtype is np.float32:
precision = 4
else:
precision = 5
X = X.astype(dtype, copy=False)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X), precision)
for csr_index_dtype in [np.int32, np.int64]:
Xcsr = sparse.csr_matrix(X, dtype=dtype)
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if csr_index_dtype is np.int64:
Xcsr.indptr = Xcsr.indptr.astype(csr_index_dtype, copy=False)
Xcsr.indices = Xcsr.indices.astype(csr_index_dtype, copy=False)
assert Xcsr.indices.dtype == csr_index_dtype
assert Xcsr.indptr.dtype == csr_index_dtype
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True),
precision)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr),
precision)
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.1,
random_state=0)
assert X.shape == (n_samples, n_features)
# compute the singular values of X
|
using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
for normalizer in ['auto', 'none', 'LU', 'QR']:
# compute the singular values of X using the fast approximate
# method without the iterated power method
_, sa,
|
_ = randomized_svd(X, k, n_iter=0,
power_iteration_normalizer=normalizer,
random_state=0)
# the approximation does not tolerate the noise:
assert np.abs(s[:k] - sa).max() > 0.01
# compute the singular values of X using the fast approximate
# method with iterated power method
_, sap, _ = randomized_svd(X, k,
power_iteration_normalizer=normalizer,
|
cmtcoin/wallet
|
contrib/pyminer/pyminer.py
|
Python
|
mit
| 6,434 | 0.034815 |
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.it
|
erate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
i
|
f __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7046
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
anandology/pyjamas
|
pyjs/src/pyjs/translator_dict.py
|
Python
|
apache-2.0
| 117,085 | 0.001196 |
#!/usr/bin/env python
import sys
import os
import re
from lib2to3.pgen2.driver import Driver
from lib2to3 import pygram, pytree
from lib2to3.pytree import Node, Leaf, type_repr
from lib2to3.pygram import python_symbols
def sym_type(name):
return getattr(python_symbols, name)
def new_node(name):
return Node(sym_type(name), [])
import __Pyjamas__
from __Future__ import __Future__
# This is taken from the django project.
# Escape every ASCII character with a value less than 32.
JS_ESCAPES = (
('\\', r'\x5C'),
('\'', r'\x27'),
('"', r'\x22'),
('>', r'\x3
|
E'),
('<', r'\x3C'),
('&', r'\x26'),
(';', r'\x3B')
) + tuple([('%c' % z, '\\x%02X' % z) for z in range(32)])
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in JS_ESCAPES:
value = value.replace(bad, good)
return value
re_js_string_escape = ''.join([chr(i) for i in range(32)])
re_js_string_escape += '''\\\\"'<>&;'''
re_js_string_escape = re.
|
compile("[%s]" % re_js_string_escape)
re_int = re.compile('^[-+]?[0-9]+$')
re_long = re.compile('^[-+]?[0-9]+[lL]$')
re_hex_int = re.compile('^[-+]?0x[0-9a-fA-F]+$')
re_hex_long = re.compile('^[-+]?0x[0-9a-fA-F]+[lL]$')
re_oct_int = re.compile('^[-+]?0[0-8]+$')
re_oct_long = re.compile('^[-+]?0[0-8]+[lL]$')
builtin_names = [
'ArithmeticError',
'AssertionError',
'AttributeError',
'BaseException',
'BufferError',
'BytesWarning',
'DeprecationWarning',
'EOFError',
'Ellipsis',
'EnvironmentError',
'Exception',
'False',
'FloatingPointError',
'FutureWarning',
'GeneratorExit',
'IOError',
'ImportError',
'ImportWarning',
'IndentationError',
'IndexError',
'KeyError',
'KeyboardInterrupt',
'LookupError',
'MemoryError',
'NameError',
'None',
'NotImplemented',
'NotImplementedError',
'OSError',
'OverflowError',
'PendingDeprecationWarning',
'ReferenceError',
'RuntimeError',
'RuntimeWarning',
'StandardError',
'StopIteration',
'SyntaxError',
'SyntaxWarning',
'SystemError',
'SystemExit',
'TabError',
'True',
'TypeError',
'UnboundLocalError',
'UnicodeDecodeError',
'UnicodeEncodeError',
'UnicodeError',
'UnicodeTranslateError',
'UnicodeWarning',
'UserWarning',
'ValueError',
'Warning',
'ZeroDivisionError',
'_',
'__debug__',
'__doc__',
'__import__',
'__name__',
'__package__',
'abs',
'all',
'any',
'apply',
'basestring',
'bin',
'bool',
'buffer',
'bytearray',
'bytes',
'callable',
'chr',
'classmethod',
'cmp',
'coerce',
'compile',
'complex',
'copyright',
'credits',
'delattr',
'dict',
'dir',
'divmod',
'enumerate',
'eval',
'execfile',
'exit',
'file',
'filter',
'float',
'format',
'frozenset',
'getattr',
'globals',
'hasattr',
'hash',
'help',
'hex',
'id',
'input',
'int',
'intern',
'isinstance',
'issubclass',
'iter',
'len',
'license',
'list',
'locals',
'long',
'map',
'max',
'min',
'next',
'object',
'oct',
'open',
'ord',
'pow',
'print',
'property',
'quit',
'range',
'raw_input',
'reduce',
'reload',
'repr',
'reversed',
'round',
'set',
'setattr',
'slice',
'sorted',
'staticmethod',
'str',
'sum',
'super',
'tuple',
'type',
'unichr',
'unicode',
'vars',
'xrange',
'zip',
]
class TranslateOptions(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except:
return None
class Name(object):
def __init__(self, name, reflineno, glob=False, to_js=None):
self.name = name
self.reflineno = reflineno
self.glob = glob
self.to_js = to_js
self.depth = None
self.builtin = False
def __str__(self):
return "<Name %s %s %s>" % (self.name, self.reflineno, self.glob)
def __repr__(self):
return "<Name %s %s %s>" % (self.name, self.reflineno, self.glob)
class Names(dict):
pass
class ClassNames(Names):
pass
class AstNode(object):
pass
class Argument(AstNode):
def __init__(self, name, value=None):
self.name = name
self.value = value
class Attribute(AstNode):
def __init__(self, name):
self.name = name
class Code(AstNode):
def __new__(cls, code, lineno):
if code is None:
return None
return object.__new__(cls)
def __init__(self, code, lineno):
self.code = code
self.lineno = lineno
def __str__(self):
if self.code is None:
return None
return str(self.code)
def __repr__(self):
if self.code is None:
return None
return repr(self.code)
class Decorator(AstNode):
def __init__(self, name, lineno):
self.name = name
self.lineno = lineno
class Import(AstNode):
def __init__(self, modname, assname=None, fromlist=None):
self.modname = '.'.join(modname)
if assname is None:
self.assname = modname[0]
else:
self.assname = assname
self.fromlist = fromlist
class Parameters(AstNode):
def __init__(self, args, star_args, dstar_args, defaults):
assert isinstance(args, list)
self.args = []
self.named_args = {}
for arg in args:
if not isinstance(arg, Argument):
self.args.append(arg)
else:
if arg.name == '*':
assert star_args is None
star_args = arg.value
continue
if arg.name == '**':
assert dstar_args is None
dstar_args = arg.value
continue
self.named_args[arg.name] = arg.value
if not self.named_args:
self.named_args = None
self.star_args = star_args
self.dstar_args = dstar_args
self.all_args = args[:]
if star_args is not None:
self.all_args.append(star_args)
if dstar_args is not None:
self.all_args.append(dstar_args)
self.defaults = defaults
class Slice(AstNode):
def __init__(self, items):
assert isinstance(items, tuple)
self.items = items
def __str__(self):
return 'Slice%s' % (self.items,)
def __repr__(self):
return 'Slice%s' % (self.items,)
leaf_type = {
1: 'name',
2: 'number',
3: 'str',
}
# TODO: import this from mkbuiltin.py
func_type = {
'function': 1,
'staticmethod': 2,
'classmethod': 3,
}
# TODO: import this from mkbuiltin.py
short_names = {
'module': 'm$',
'globals': 'g$',
'locals': 'l$',
'namestack': 'n$',
'funcbase': 'f$',
'builtin': 'B$',
'constants': 'C$',
'None': 'N$',
'True': 'T$',
'False': 'F$',
}
op_names1 = {
'inv': 'op_inv',
'neg': 'op_neg',
'not': 'op_not',
}
op_names2 = {
'+': 'op_add',
'-': 'op_sub',
'*': 'op_mul',
'/': 'op_div', # set to op_truediv with 'from __future__ import division'
'//': 'op_floordiv',
'%': 'op_mod',
'**': 'op_pow',
'&': 'op_bitand',
'|': 'op_bitor',
'^': 'op_bitxor',
'<<': 'op_bitlshift',
'>>': 'op_bitrshift',
'+=': 'op_add',
'-=': 'op_sub',
'*=': 'op_mul',
'/=': 'op_div',
'//=': 'op_floordiv',
'%=': 'op_mod',
'**=': 'op_pow',
'&=': 'op_bitand',
'|=': 'op_bitor',
'^=': 'op_bitxor',
'<<=': 'op_bitlshift',
'>>=': 'op_bitrshift',
}
op_compare = {
'is': 'op_is',
'is not': 'op_is_not',
'==': 'op_eq',
'!=': 'op_ne',
'<': 'op_lt',
'<=': 'op_le',
'>': 'op_gt',
'>=': 'op_ge',
'in': 'op_in',
'not in': 'op_not_in'
|
marcel-goldschen-ohm/ModelViewPyQt
|
FileDialogDelegateQt.py
|
Python
|
mit
| 2,023 | 0.001483 |
""" FileDialogDelegateQt.py: Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
import os.path
try:
from PyQt5.QtCore import Qt, QT_VERSION_STR
from PyQt5.QtWidgets import QStyledItemDelegate, QFileDialog
except ImportError:
try:
from PyQt4.QtCore import Qt, QT_VERSION_STR
from PyQt4.QtGui import QStyledItemDelegate, QFileDialog
except ImportError:
raise ImportError("FileDialogDelegateQt: Requires PyQt5 or PyQt4.")
__author__ = "Marcel Goldschen-Ohm <marcel.goldschen@gmail.com>"
class FileDialogDelegateQt(QStyledItemDelegate):
""" Delegate that pops up a file dialog when double clicked.
Sets the model data to the selected file name.
"""
def __i
|
nit__(self, parent=None):
QStyledItemDelegate.__init__(self, parent)
def createEditor(self, parent, option, index):
""" Instead of creating an editor, just popup a modal file dialog
and set the model data to the selected file name, if any.
"""
pathToFileName = ""
if QT_VERSION_STR[0] == '4':
pathToFileName = QFileDialog.getOpenFileName(None,
|
"Open")
elif QT_VERSION_STR[0] == '5':
pathToFileName, temp = QFileDialog.getOpenFileName(None, "Open")
pathToFileName = str(pathToFileName) # QString ==> str
if len(pathToFileName):
index.model().setData(index, pathToFileName, Qt.EditRole)
index.model().dataChanged.emit(index, index) # Tell model to update cell display.
return None
def displayText(self, value, locale):
""" Show file name without path.
"""
try:
if QT_VERSION_STR[0] == '4':
pathToFileName = str(value.toString()) # QVariant ==> str
elif QT_VERSION_STR[0] == '5':
pathToFileName = str(value)
path, fileName = os.path.split(pathToFileName)
return fileName
except:
return ""
|
pmlrsg/PySpectra
|
PySpectra/dart.py
|
Python
|
mit
| 2,368 | 0.000845 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been created by ARSF Data Analysis Node and
# is licensed under the MI
|
T Licence. A copy of this
# licence is available to download with this file.
#
# Author: Robin Wilson
# Created: 2015-11-16
import sys
import numpy as np
|
import pandas as pd
# Python 2/3 imports
try:
from StringIO import StringIO
except ImportError:
if sys.version_info[0] >= 3:
from io import StringIO
else:
raise
from . import spectra_reader
class DARTFormat(spectra_reader.SpectraReader):
"""
Class to read spectra from DART format files
"""
def get_spectra(self, filename):
"""
Extract spectra from a DART format file
Requires:
* filename - the filename to the DART format file to read
Returns:
* Spectra object with values, radiance, pixel and line
"""
f = open(filename, 'r')
s = StringIO()
within_comment = False
while True:
try:
line = f.next()
except:
break
if "*" in line and within_comment:
within_comment = False
continue
elif "*" in line and not within_comment:
within_comment = True
if not within_comment and not line.isspace():
s.write(line)
s.seek(0)
df = pd.read_table(s, header=None, names=["wavelength", "reflectance",
"refractive_index", "A", "Alpha",
"wHapke", "AHapkeSpec",
"AlphaHapkeSpec", "TDirect",
"TDiffuse"])
df.reflectance = df.reflectance / 100
wavelengths = np.array(df.wavelength)
reflectance = np.array(df.reflectance)
self.spectra.file_name = filename
self.spectra.wavelengths = wavelengths
self.spectra.values = reflectance
self.spectra.pixel = None
self.spectra.line = None
self.spectra.latitude = None
self.spectra.longitude = None
self.spectra.wavelength_units = "nm"
self.spectra.value_units = "reflectance"
self.spectra.value_scaling = 1
return self.spectra
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_04_01/aio/operations/_nat_gateways_operations.py
|
Python
|
mit
| 26,918 | 0.004681 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatGatewaysOperations:
"""NatGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
nat_gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified nat gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller f
|
rom a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPolle
|
r that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
nat_gateway_name=nat_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'natGatewayName': self._serialize.url("nat_gateway_name", nat_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/natGateways/{natGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
nat_gateway_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NatGateway":
"""Gets the specified nat gateway in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param nat_gateway_name: The name of the nat gateway.
:type nat_gateway_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NatGateway, or the result of cls(response)
:rtype:
|
TheBraveWarrior/pyload
|
module/plugins/crypter/Movie2KTo.py
|
Python
|
gpl-3.0
| 472 | 0 |
# -*- coding: utf-8 -*-
from ..internal.DeadC
|
rypter import DeadCrypter
class Movie2KTo(DeadCrypter):
__name__ = "Movie2KTo"
__type__ = "crypter"
__version__ = "0.56"
__status__ = "stable"
__pattern__ = r'http://(?:www\.)?movie2k\.to/(.+)\.html'
__config__ = [("activated", "bool", "Activated", True)]
__description__ = """Movie2k.to decrypter plugin"""
__license__ = "GPLv3"
__aut
|
hors__ = [("4Christopher", "4Christopher@gmx.de")]
|
jonparrott/google-cloud-python
|
error_reporting/google/cloud/errorreporting_v1beta1/gapic/report_errors_service_client_config.py
|
Python
|
apache-2.0
| 987 | 0 |
config = {
"interfaces": {
"google.devtools.clouderrorreporting.v1beta1.ReportErrorsService": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "UNAVAILABLE"],
"non_idempotent": []
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout
|
_millis": 20000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 20000,
"total_timeout_millis": 600000
|
}
},
"methods": {
"ReportErrorEvent": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default"
}
}
}
}
}
|
Fitzgibbons/Cryptograpy
|
rabinmiller.py
|
Python
|
mit
| 526 | 0.001901 |
import random
rand = random.SystemRandom()
def rabinMiller(num):
if num % 2 == 0:
return False
s = num - 1
t = 0
while s % 2 == 0:
s = s // 2
t += 1
for trials in range(64):
a = rand.randrange(2, num - 1)
v = pow(a, s, num)
if v != 1:
i = 0
while v != (num - 1):
if i =
|
= t - 1:
return False
else:
i = i + 1
v = (v ** 2) % num
return True
| |
johnnoone/salt-targeting
|
src/salt/targeting/__init__.py
|
Python
|
mit
| 816 | 0.006127 |
'''
salt.targeting
~~~~~~~~~~~~~~
'''
import logging
log = logging.getLogger(__name__)
from .parser import *
from .query import *
from .rules import *
from .subjects import *
#: defines minion targeting
minion_targeting = Query(default_rule=GlobRule
|
)
minion_targeting.register(GlobRule, None, 'glob')
minion_targeting.register(GrainRule, 'G', 'grain')
minion_targeting.register(PillarRule, 'I', 'pillar')
minion_targeting.register(PCR
|
ERule, 'E', 'pcre')
minion_targeting.register(GrainPCRERule, 'P', 'grain_pcre')
minion_targeting.register(SubnetIPRule, 'S')
minion_targeting.register(ExselRule, 'X', 'exsel')
minion_targeting.register(LocalStoreRule, 'D')
minion_targeting.register(YahooRangeRule, 'R')
minion_targeting.register(ListEvaluator, 'L', 'list')
minion_targeting.register(NodeGroupEvaluator, 'N')
|
citrix-openstack-build/oslo.messaging
|
oslo/messaging/notify/_impl_noop.py
|
Python
|
apache-2.0
| 811 | 0 |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICE
|
NSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permission
|
s and limitations
# under the License.
from oslo.messaging.notify import notifier
class NoOpDriver(notifier._Driver):
def notify(self, ctxt, message, priority):
pass
|
uingei/mm
|
merklemaker.py
|
Python
|
agpl-3.0
| 19,767 | 0.034704 |
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from binascii import b2a_hex
import bitcoin.script
from bitcoin.script import countSigOps
from bitcoin.txn import Txn
from bitcoin.varlen import varlenEncode, varlenDecode
from collections import deque
from copy import deepcopy
from queue import Queue
import jsonrpc
import logging
from math import log
from merkletree import MerkleTree
from struct import pack
import threading
from time import sleep, time
import traceback
import config
_makeCoinbase = [0, 0]
def MakeBlockHeader(MRD):
(merkleRoot, merkleTree, coinbase, prevBlock, bits) = MRD[:5]
timestamp = pack('<L', int(time()))
hdr = b'\2\0\0\0' + prevBlock + merkleRoot + timestamp + bits + b'iolE'
return hdr
def assembleBlock(blkhdr, txlist):
payload = blkhdr
payload += varlenEncode(len(txlist))
for tx in txlist:
payload += tx.data
return payload
class merkleMaker(threading.Thread):
OldGMP = None
GBTCaps = [
'coinbasevalue',
'coinbase/append',
'coinbase',
'generation',
'time',
'transactions/remove',
'prevblock',
]
GBTReq = {
'capabilities': GBTCaps,
}
GMPReq = {
'capabilities': GBTCaps,
'tx': 'obj',
}
def __init__(self, *a, **k):
super().__init__(*a, **k)
self.daemon = True
self.logger = logging.getLogger('merkleMaker')
self.CoinbasePrefix = b'Mined by Multicoin.co'
self.CoinbaseAux = {}
self.isOverflowed = False
self.lastWarning = {}
self.MinimumTxnUpdateWait = 1
self.overflowed = 0
self.DifficultyChangeMod = 1
def _prepare(self):
self.access = jsonrpc.ServiceProxy(self.UpstreamURI)
self.MinimumTxnUpdateWait = 1
self.IdleSleepTime = 1
self.TxnUpdateRetryWait = 1
self.ready = False
self.readyCV = threading.Condition()
self.currentBlock = (None, None, None)
self.lastBlock = (None, None, None)
self.currentMerkleTree = None
self.merkleRoots = deque(maxlen=self.WorkQueueSizeRegular[1])
self.LowestMerkleRoots = self.WorkQueueSizeRegular[1]
if not hasattr(self, 'WorkQueueSizeClear'):
self.WorkQueueSizeClear = self.WorkQueueSizeLongpoll
self._MaxClearSize = max(self.WorkQueueSizeClear[1], self.WorkQueueSizeLongpoll[1])
self.clearMerkleRoots = Queue(self._MaxClearSize)
self.LowestClearMerkleRoots = self.WorkQueueSizeClear[1]
self.nextMerkleRoots = Queue(self._MaxClearSize)
if not hasattr(self, 'WarningDelay'):
self.WarningDelay = max(15, self.MinimumTxnUpdateWait * 2)
if not hasattr(self, 'WarningDelayTxnLongpoll'):
self.WarningDelayTxnLongpoll = self.WarningDelay
if not hasattr(self, 'WarningDelayMerkleUpdate'):
self.WarningDelayMerkleUpdate = self.WarningDelay
self.lastMerkleUpdate = 0
self.nextMerkleUpdate = 0
def createClearMerkleTree(self, height):
subsidy = self.access.getblocktemplate()['coinbasevalue']
cbtxn = self.makeCoinbaseTxn(subsidy, False)
cbtxn.assemble()
return MerkleTree([cbtxn])
def updateBlock(self, newBlock, height = None, bits = None, _HBH = None):
if newBlock == self.currentBlock[0]:
if height in (None, self.currentBlock[1]) and bits in (None, self.currentBlock[2]):
return
if not self.currentBlock[2] is None:
self.logger.error('Was working on block with wrong specs: %s (height: %d->%d; bits: %s->%s' % (
b2a_hex(newBlock[::-1]).decode('utf8'),
self.currentBlock[1],
height,
b2a_hex(self.currentBlock[2][::-1]).decode('utf8'),
b2a_hex(bits[::-1]).decode('utf8'),
))
# Old block is invalid
if self.currentBlock[0] != newBlock:
self.lastBlock = self.currentBlock
lastHeight = self.currentBlock[1]
if height is None:
height = self.currentBlock[1] + 1
if bits is None:
if height % self.DifficultyChangeMod == 1 or self.currentBlock[2] is None:
self.logger.warning('New block: %s (height %d; bits: UNKNOWN)' % (b2a_hex(newBlock[::-1]).decode('utf8'), height))
# Pretend to be 1 lower height, so we possibly retain nextMerkleRoots
self.currentBlock = (None, height - 1, None)
self.clearMerkleRoots = Queue(0)
self.merkleRoots.clear()
self.ready = False
return
else:
bits = self.currentBlock[2]
if _HBH is None:
_HBH = (b2a_hex(newBlock[::-1]).decode('utf8'), b2a_hex(bits[::-1]).decode('utf8'))
self.logger.info('New block: %s (height: %d; bits: %s)' % (_HBH[0], height, _HBH[1]))
self.currentBlock = (newBloc
|
k, height, bits)
if lastHeight != height:
# TODO: Perhaps reuse clear merkle trees more intelligently
if lastHeight == height - 1:
self.curClearMerkleTree = self.nextMerkleTree
self.clearMerkleRoots = self.nextMerkleRoots
self.logger.debug('Adopting next-height clear merkleroots :)')
else:
if lastHeight:
self.logger.warning('Change from height %d->%d
|
; no longpoll merkleroots available!' % (lastHeight, height))
self.curClearMerkleTree = self.createClearMerkleTree(height)
self.clearMerkleRoots = Queue(self.WorkQueueSizeClear[1])
self.nextMerkleTree = self.createClearMerkleTree(height + 1)
self.nextMerkleRoots = Queue(self._MaxClearSize)
else:
self.logger.debug('Already using clear merkleroots for this height')
self.currentMerkleTree = self.curClearMerkleTree
self.merkleRoots.clear()
if not self.ready:
self.ready = True
with self.readyCV:
self.readyCV.notify_all()
self.needMerkle = 2
self.onBlockChange()
def _trimBlock(self, MP, txnlist, txninfo, floodn, msgf):
fee = txninfo[-1].get('fee', None)
if fee is None:
raise self._floodCritical(now, floodn, doin=msgf('fees unknown'))
if fee:
# FIXME: coinbasevalue is *not* guaranteed to exist here
MP['coinbasevalue'] -= fee
txnlist[-1:] = ()
txninfo[-1:] = ()
return True
# Aggressive "Power Of Two": Remove transactions even with fees to reach our goal
def _APOT(self, txninfopot, MP, POTInfo):
feeTxnsTrimmed = 0
feesTrimmed = 0
for txn in txninfopot:
if txn.get('fee') is None:
self._floodWarning(now, 'APOT-No-Fees', doin='Upstream didn\'t provide fee information required for aggressive POT', logf=self.logger.info)
return
if not txn['fee']:
continue
feesTrimmed += txn['fee']
feeTxnsTrimmed += 1
MP['coinbasevalue'] -= feesTrimmed
POTInfo[2] = [feeTxnsTrimmed, feesTrimmed]
self._floodWarning(now, 'POT-Trimming-Fees', doin='Aggressive POT trimming %d transactions with %d.%08d BTC total fees' % (feeTxnsTrimmed, feesTrimmed//100000000, feesTrimmed % 100000000), logf=self.logger.debug)
return True
def _makeBlockSafe(self, MP, txnlist, txninfo):
blocksize = sum(map(len, txnlist)) + 80
while blocksize > 934464: # 1 "MB" limit - 64 KB breathing room
txnsize = len(txnlist[-1])
self._trimBlock(MP, txnlist, txninfo, 'SizeLimit', lambda x: 'Making blocks over 1 MB size limit (%d bytes; %s)' % (blocksize, x))
blocksize -= txnsize
# NOTE: This check doesn't work at all without BIP22 transaction obj format
blocksigops = sum(a.get('sigops', 0) for a in txninfo)
while blocksigops > 19488: # 20k limit - 0x200 breathing room
txnsigops = txninfo[-1]['sigops']
self._trimBlock(MP, txnlist, txninfo, 'SigOpLimit', lambda x: 'Making blocks over 20k SigOp limit (%d; %s)' % (blocksigops, x))
blocksigops -= txnsigops
# Aim to produce blocks with "Power Of Two" transaction counts
# This helps avoid any chance of someone abusing CVE-2012-2459 with them
POTMode = getattr(self, 'POT', 0)
txncount = len(txnlist) + 1
if POTMode:
feetxncount = txncount
|
IntelligentVisibility/ztpserver
|
ztpserver/__init__.py
|
Python
|
bsd-3-clause
| 1,638 | 0.000611 |
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, D
|
ATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
__version__ = '1.1.0'
__author__ = 'Arista Networks'
|
Lightning3105/Legend-Of-Aiopa-RPG
|
Updater.py
|
Python
|
gpl-2.0
| 9,598 | 0.007293 |
import urllib.request
import pickle
import sys
try:
import Variables as v
except:
class var():
def __init__(self):
self.screen = None
v = var()
import pygame as py
class textLabel(py.sprite.Sprite):
def __init__(self, text, pos, colour, font, size, variable = False, centred = False):
super().__init__()
self.text = text
self.pos = pos
self.colour = colour
self.font = font
self.size = size
self.variable = variable
self.centred = centred
def update(self):
pos = self.pos
font = py.font.Font(self.font, self.size)
if not self.variable:
label = font.render(self.text, 1, self.colour)
if self.variable:
label = font.render(str(getattr(v, self.text)), 1, self.colour)
if self.centred:
pos = list(self.pos)
pos[0] -= font.size(self.text)[0] / 2
pos[1] -= font.size(self.text)[1] / 2
pos = tuple(pos)
v.screen.blit(label, pos)
class Button(py.sprite.Sprite):
def __init__(self, text, pos, size, hovercolour, normalcolour, font, ID, centred = False, bsize=(0,0)):
super().__init__()
self.ID = ID
self.hovered = False
self.text = text
self.pos = pos
self.hcolour = hovercolour
self.ncolour = normalcolour
self.font = font
self.font = py.font.Font(font, int(size))
self.centred = centred
self.size = bsize
self.set_rect()
def update(self):
self.set_rend()
py.draw.rect(v.screen, self.get_color(), self.rect)
v.screen.blit(self.rend, self.rect)
if self.rect.collidepoint(v.mouse_pos):
self.hovered = True
else:
self.hovered = False
def set_rend(self):
self.rend = self.font.render(self.text, True, (0,0,0))
def get_color(self):
if self.hovered:
return self.hcolour
else:
return self.ncolour
def set_rect(self):
self.set_rend()
self.rect = self.rend.get_rect()
if not self.centred:
self.rect.topleft = self.pos
if self.centred:
self.rect.center = self.pos
if not self.size[0] == 0:
self.rect.width = self.size[0]
if not self.size[1] == 0:
self.rect.height = self.size[1]
def pressed(self):
mouse = v.mouse_pos
if mouse[0] > self.rect.topleft[0]:
if mouse[1] > self.rect.topleft[1]:
if mouse[0] < self.rect.bottomright[0]:
if mouse[1] < self.rect.bottomright[1]:
return True
else: return False
else: return False
else: return False
else: return False
import os, shutil
try:
shutil.copyfile("Resources/Fonts/Vecna.otf", "Update/Vecna.otf")
theFont = "Update/Vecna.otf"
except:
theFont = None
py.init()
v.screen = py.display.set_mode((640, 480))
v.screen.fill((20, 20, 20))
textLabel("Checking For Updates...", (320, 240), (255, 255, 255), theFont, 50, False, True).update()
py.display.flip()
tries = 0
def reporthook(count, blockSize, totalSize):
if totalSize == -1:
print("FAILED TOTALSIZE")
raise Exception()
#Shows percentage of download
py.event.pump()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
percent = int(count*blockSize*100/totalSize)
rect = py.Rect(100, 240, percent*4.4, 30)
v.screen.fill((20, 20, 20))
py.draw.rect(v.screen, (255, 0, 0), rect)
py.draw.rect(v.screen, (0, 0, 0), rect, 2)
py.draw.rect(v.screen, (0, 0, 0), (100, 240, 440, 30), 2)
#font = py.font.Font(theFont, 25)
#title = font.render("Downloading...", 1, (255, 255, 255))
#progress = font.render(str(percent) + "%", 1, (255, 255, 255))
#v.screen.blit(title, (200, 200))
#v.screen.blit(progress, (200, 250))
textLabel("Downloading...", (320, 150), (255, 255, 255), theFont, 50, False, True).update()
textLabel(str(percent) + "%", (320, 255), (255, 255, 255), theFont, 20, False, True).update()
py.display.flip()
#sys.stdout.write("\r" + "...%d%%" % percent)
#sys.stdout.flush()
def recursive_overwrite(src, dest, ignore=None):
if os.path.isdir(src):
if not os.path.isdir(dest):
os.makedirs(dest)
files = os.listdir(src)
if ignore is not None:
ignored = ignore(src, files)
else:
ignored = set()
for f in files:
if f not in ignored:
recursive_overwrite(os.path.join(src, f),
os.path.join(dest, f),
ignore)
else:
shutil.copyfile(src, dest)
def updateCheck():
global latest
page = urllib.request.urlopen('https://github.com/Lightning3105/Legend-Of-Aiopa-RPG/commits/master')
page = str(page.read())
ind = page.find('class="sha btn btn-outline"')
latest = page[ind + 38:ind + 45]
print(latest)
#CHECK IF LATEST IS PROPER
try:
f = open("Saves/current.version", "rb")
current = pickle.load(f)
f.close()
except:
print("create new file")
try:
os.mkdir("Saves")
except:
pass
f = open("Saves/current.version", "wb")
current = 0000
pickle.dump(current, f)
f.close()
print(current, "vs", latest)
if current != latest:
from os import remove
try:
remove("Update/download.zip")
except:
pass
print("downloading latest")
buttons = py.sprite.Group()
buttons.add(Button("Update", (220, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "Y", centred=True))
buttons.add(Button("Ignore", (420, 240), 60, (100, 100, 100), (255, 255, 255), theFont, "N", centred=True))
buttons.add(Button("Skip Update", (320, 300), 40, (100, 100, 100),
|
(255, 255, 255), theFont, "S", centred=True))
|
labels = py.sprite.Group()
labels.add(textLabel("An Update Is Available:", (320, 150), (255, 255, 255), theFont, 50, False, True))
labels.add(textLabel(str(str(current) + " ==> " + str(latest)), (320, 180), (255, 255, 255), theFont, 20, False, True))
while True:
py.event.pump()
v.screen.fill((20, 20, 20))
buttons.update()
labels.update()
for event in py.event.get():
if event.type == py.QUIT:
sys.exit()
elif event.type == py.MOUSEBUTTONDOWN:
for button in buttons:
if button.pressed():
id = button.ID
if id == "Y":
global tries
tries = 0
download()
return
if id == "N":
return
if id == "S":
f = open("Saves/current.version", "wb")
current = latest
pickle.dump(current, f)
f.close()
return
py.display.flip()
else:
v.screen.fill((20, 20, 20))
t = textLabel("No Update!", (320, 250), (255, 0, 0), theFont, 70, False, True)
v.current = current
t2 = textLabel("current", (320, 300), (255, 200, 200), theFont, 50, True, True)
t.update()
t2.update()
py.display.update()
if __name__ == "__main__":
py.time.wait(2000)
def download():
global tries
try:
|
icucinema/madcow
|
madcow/modules/election.py
|
Python
|
gpl-3.0
| 1,577 | 0.001902 |
"""Predicted Electoral Vote Count"""
import re
from madcow.util.http import getsoup
from madcow.util.color import ColorLib
from madcow.util import Module, strip_html
class Main(Module):
pattern = re.compile(r'^\s*(election|ev)\s*$', re.I)
help = u'ev - current election 2008 vote prediction'
baseurl = u'http://www.electoral-vote.com/'
def init(self):
if self.madcow is None:
self.colorlib = ColorLib('ansi')
else:
self.colorlib = self.madcow.colorlib
def colorize(self, color, key, val):
return u'%s: %s' % (key, val)
def render(self, node):
pass
def response(self, nick, args, kwargs):
soup = getsoup(self.baseurl)
out = []
for box in soup.find('div', 'score-box').findAll('div', 'box'):
score = []
for key in 'name', 'score':
val = strip_html(box.find('span', key).renderContents()).r
|
eplace(u'\xa0', u'').strip()
if key == 'name':
if val == u'Obama':
color = '
|
blue'
elif val == 'Romney':
color = 'red'
else:
color = None
if color:
val = self.colorlib.get_color(color, text=val)
if val:
score.append(val)
if score:
out.append(u'%s: %s' % tuple(score))
return u'%s: %s' % (nick, u', '.join(out))
#from IPython.Shell import IPShellEmbed as S; #S()()
|
simonwydooghe/ansible
|
lib/ansible/modules/network/fortios/fortios_firewall_ssl_server.py
|
Python
|
gpl-3.0
| 15,241 | 0.001706 |
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ssl_server
short_description: Configure SSL servers in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ssl_server category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ssl_server:
description:
- Configure SSL servers.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using t
|
he top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
add_header_x_
|
forwarded_proto:
description:
- Enable/disable adding an X-Forwarded-Proto header to forwarded requests.
type: str
choices:
- enable
- disable
ip:
description:
- IPv4 address of the SSL server.
type: str
mapped_port:
description:
- Mapped server service port (1 - 65535).
type: int
name:
description:
- Server name.
required: true
type: str
port:
description:
- Server service port (1 - 65535).
type: int
ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
type: str
choices:
- high
- medium
- low
ssl_cert:
description:
- Name of certificate for SSL connections to this server. Source vpn.certificate.local.name.
type: str
ssl_client_renegotiation:
description:
- Allow or block client renegotiation by server.
type: str
choices:
- allow
- deny
- secure
ssl_dh_bits:
description:
- Bit-size of Diffie-Hellman (DH) prime used in DHE-RSA negotiation.
type: str
choices:
- 768
- 1024
- 1536
- 2048
ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
type: str
choices:
- tls-1.0
- tls-1.1
- tls-1.2
ssl_mode:
description:
- SSL/TLS mode for encryption and decryption of traffic.
type: str
choices:
- half
- full
ssl_send_empty_frags:
description:
- Enable/disable sending empty fragments to avoid attack on CBC IV.
type: str
choices:
- enable
- disable
url_rewrite:
description:
- Enable/disable rewriting the URL.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure SSL servers.
fortios_firewall_ssl_server:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ssl_server:
add_header_x_forwarded_proto: "enable"
ip: "<your_own_value>"
mapped_port: "5"
name: "default_name_6"
port: "7"
ssl_algorithm: "high"
ssl_cert: "<your_own_value> (source vpn.certificate.local.name)"
ssl_client_renegotiation: "allow"
ssl_dh_bits: "768"
ssl_max_version: "tls-1.0"
ssl_min_version: "tls-1.0"
ssl_mode: "half"
ssl_send_empty_frags: "enable"
url_rewrite: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used t
|
akhilaananthram/nupic.fluent
|
fluent/encoders/language_encoder.py
|
Python
|
agpl-3.0
| 7,974 | 0.005769 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import numpy
import random
from nupic.encoders.utils import bitsToString
class LanguageEncoder(object):
"""
An encoder converts a value to a sparse distributed representation (SDR).
The Encoder superclass implements:
- bitmapToSDR() returns binary SDR of a bitmap
- bitmapFromSDR() returns the bitmap rep of an SDR
- pprintHeader() prints a header describing the encoding to the terminal
- pprint() prints an encoding to the terminal
- decodedToStr() returns pretty print string of decoded SDR
Methods/properties that must be implemented by subclasses:
- encode() returns a numpy array encoding the input
- decode() returns a list of strings representing a decoded SDR
- getWidth() returns the output width, in bits
- getDescription() returns a dict describing the encoded output
"""
def __init__(self, n=16384, w=328):
"""The SDR dimensions are standard for Cortical.io fingerprints."""
self.n = n
self.w = w
self.targetSparsity = 5.0
def encode(self, inputText):
"""
Encodes inputText and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by getWidth().
Note: The numpy output array is reused, so clear it before updating it.
@param inputData (str) Data to encode. This should be validated by
the encoder subclass.
@param output (numpy) 1-D array of same length returned by
getWidth().
"""
raise NotImplementedError
def encodeIntoArray(self, inputText, output):
"""
Encodes inputData and puts the encoded value into the numpy output array,
which is a 1-D array of length returned by getWidth().
Note: The numpy output array is reused, so clear it before updating it.
@param inputData Data to encode. This should be validated by the encoder.
@param output numpy 1-D array of same length returned by getWidth()
"""
raise NotImplementedError
def decode(self, encoded):
"""
Decodes the SDR encoded. See subclass implementation for details; the
decoding approaches and return objects vary depending on the encoder.
To pretty print the return value from this method, use decodedToStr().
@param encoded (numpy) Encoded 1-d array (an SDR).
"""
raise NotImplementedError
def getWidth(self):
"""
Get an encoding's output width in bits. See subclass implementation for
details.
"""
raise NotImplementedError()
def getDescription(self):
"""
Returns a tuple, each containing (name, offset).
The name is a string description of each sub-field, and offset is the bit
offset of the sub-field for that encoder; should be 0.
"""
raise NotImplementedError()
def bitmapToSDR(self, bitmap):
"""Convert SDR encoding from bitmap to binary numpy array."""
sdr = numpy.zeros(self.n)
sdr[bitmap] = 1
return sdr
def bitmapFromSDR(self, sdr):
"""Convert SDR encoding from binary numpy array to bitmap."""
return numpy.array([i for i in range(len(sdr)) if sdr[i]==1])
def encodeRandomly(self, text):
"""Return a random bitmap representation of the sample."""
random.seed(sample)
return numpy.sort(random.sample(xrange(self.n), self.w))
def compare(self, bitmap1, bitmap2):
"""
Compare bitmaps, returning a dict of similarity measures.
@param bitmap1 (list) Indices of ON bits.
@param bitmap2 (list) Indices of ON bits.
@return distances (dict) Key-values of distance metrics and values.
Example return dict:
{
"cosineSimilarity": 0.6666666666666666,
"euclideanDistance": 0.3333333333333333,
"jaccardDistance": 0.5,
"overlappingAll": 6,
"overlappingLeftRight": 0.6666666666666666,
"overlappingRightLeft": 0.6666666666666666,
"sizeLeft": 9,
"sizeRight": 9
}
"""
if not len(bitmap1) > 0 or not len(bitmap2) > 0:
raise ValueError("Bitmaps must have ON bits to compare.")
sdr1 = self.bitmapToSDR(bitmap1)
sdr2 = self.bitmapToSDR(bitmap2)
distances = {
"sizeLeft": float(len(bitmap1)),
"sizeRight": float(len(bitmap2)),
"overlappingAll": float(len(numpy.intersect1d(bitmap1, bitmap2))),
"euclideanDistance": numpy.linalg.norm(sdr1 - sdr2)
}
distances["overlappingLeftRight"] = (distances["overlappingAll"] /
distances["sizeLeft"])
distances["overlappingRightLeft"] = (distances["overlappingAll"] /
distances["sizeRight"])
distances["cosineSimilarity"] = (distances["overlappingAll"] /
(math.sqrt(distances["sizeLeft"]) * math.sqrt(distances["sizeRight"])))
distances["jaccardDistance"] = 1 - (distances["overlappingAll"] /
len(numpy.union1d(bitmap1, bitmap2)))
return distances
def sparseUnion(self, counts):
"""
Bits from the input patterns are unionized and then sparsified.
@param counts (Counter) A count of the ON bits for the union bitmap.
@return (list) A sparsified union bitmap.
"""
max_sparsity = int((self.targetSparsity / 100) * self.n)
w = min(len(counts), max_sparsity)
return [c[0] for c in counts.most_common(w)]
def pprintHeader(self, prefix=""):
"""
Pretty-print a header that labels the sub-fields of the encoded output.
This can be used in conjuction with pprint().
@param prefix printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
name = description[i][0]
width = description[i+1][1] - description[i][1]
formatStr = "%%-%ds |" % width
if len(name) > width:
pname = name[0:width]
else:
pname = name
print formatStr % pname,
print
print prefix, "-" * (self.getWidth() + (len(description) - 1)*3 - 1)
def pprint(self, output, prefix=""):
"""
Pretty-print the encoded output using ascii art.
@param output to print
@param prefix printed before the header if specified
"""
print prefix,
description = self.getDescription() + [("end", self.getWidth())]
for i in xrange(len(description) - 1):
offset = description[i][1]
nextoffset = description[i+1][1]
print "%s |" % bitsToString(output[offset:nextoffset]),
print
def decodedToStr(self, decodeRe
|
sults):
"""
Return a pretty print string representing the return value from decode().
"""
(fieldsDict, fieldsOrder) = decodeResults
desc = ''
for fieldName in fieldsOrder:
(ranges, rangesStr) = fieldsDict[fieldName]
if len(desc) > 0:
desc += ", %s:" % (fieldName)
else:
de
|
sc += "%s:" % (fieldName)
desc += "[%s]" % (rangesStr)
return desc
|
rahulunair/nova
|
nova/virt/libvirt/volume/quobyte.py
|
Python
|
apache-2.0
| 7,552 | 0 |
# Copyright (c) 2015 Quobyte Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import fileutils
import psutil
import six
import nova.conf
from nova import exception as nova_exception
from nova.i18n import _
import nova.privsep.libvirt
from nova import utils
from nova.virt.libvirt.volume import fs
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
SOURCE_PROTOCOL = 'quobyte'
SOURCE_TYPE = 'file'
DRIVER_CACHE = 'none'
DRIVER_IO = 'native'
VALID_SYSD_STATES = ["starting", "running", "degraded"]
SYSTEMCTL_CHECK_PATH = "/run/systemd/system"
_is_systemd = None
def is_systemd():
"""Checks if the host is running systemd"""
global _is_systemd
if _is_systemd is not None:
return _is_systemd
tmp_is_systemd = False
if psutil.Process(1).name() == "systemd" or os.path.exists(
SYSTEMCTL_CHECK_PATH):
# NOTE(kaisers): exit code might be >1 in theory but in practice this
# is hard coded to 1. Due to backwards compatibility and systemd
# CODING_STYLE this is unlikely to change.
sysdout, sysderr = processutils.execute("systemctl",
"is-system-running",
check_exit_code=[0, 1])
for state in VALID_SYSD_STATES:
if state == sysdout.strip():
tmp_is_systemd = True
break
_is_systemd = tmp_is_systemd
return _is_systemd
def mount_volume(volume, mnt_base, configfile=None):
"""Wraps execute calls for mounting a Quobyte volume"""
fileutils.ensure_tree(mnt_base)
# Note(kaisers): with systemd this requires a separate CGROUP to
# prevent Nova service stop/restarts from killing the mount.
if is_systemd():
LOG.debug('Mounting volume %s at mount point %s via systemd-run',
volume, mnt_base)
nova.privsep.libvirt.systemd_run_qb_mount(volume, mnt_base,
cfg_file=configfile)
else:
LOG.debug('Mounting volume %s at mount point %s via mount.quobyte',
volume, mnt_bas
|
e, cfg_file=configfile)
nova.privsep.libvirt.unprivileged_qb_mount(volume, mnt_base,
cfg_file=configfile)
LOG.info('Mounted volume: %s', vol
|
ume)
def umount_volume(mnt_base):
"""Wraps execute calls for unmouting a Quobyte volume"""
try:
if is_systemd():
nova.privsep.libvirt.umount(mnt_base)
else:
nova.privsep.libvirt.unprivileged_umount(mnt_base)
except processutils.ProcessExecutionError as exc:
if 'Device or resource busy' in six.text_type(exc):
LOG.error("The Quobyte volume at %s is still in use.", mnt_base)
else:
LOG.exception(_("Couldn't unmount the Quobyte Volume at %s"),
mnt_base)
def validate_volume(mount_path):
"""Determine if the volume is a valid Quobyte mount.
Runs a number of tests to be sure this is a (working) Quobyte mount
"""
partitions = psutil.disk_partitions(all=True)
for p in partitions:
if mount_path != p.mountpoint:
continue
if p.device.startswith("quobyte@") or p.fstype == "fuse.quobyte":
statresult = os.stat(mount_path)
# Note(kaisers): Quobyte always shows mount points with size 0
if statresult.st_size == 0:
# client looks healthy
return # we're happy here
else:
msg = (_("The mount %(mount_path)s is not a "
"valid Quobyte volume. Stale mount?")
% {'mount_path': mount_path})
raise nova_exception.StaleVolumeMount(msg, mount_path=mount_path)
else:
msg = (_("The mount %(mount_path)s is not a valid "
"Quobyte volume according to partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
msg = (_("No matching Quobyte mount entry for %(mount_path)s"
" could be found for validation in partition list.")
% {'mount_path': mount_path})
raise nova_exception.InvalidVolume(msg)
class LibvirtQuobyteVolumeDriver(fs.LibvirtBaseFileSystemVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def _get_mount_point_base(self):
return CONF.libvirt.quobyte_mount_point_base
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = SOURCE_PROTOCOL
conf.source_type = SOURCE_TYPE
conf.driver_cache = DRIVER_CACHE
conf.driver_io = DRIVER_IO
conf.driver_format = data.get('format', 'raw')
conf.source_path = self._get_device_path(connection_info)
return conf
@utils.synchronized('connect_qb_volume')
def connect_volume(self, connection_info, instance):
"""Connect the volume."""
if is_systemd():
LOG.debug("systemd detected.")
else:
LOG.debug("No systemd detected.")
data = connection_info['data']
quobyte_volume = self._normalize_export(data['export'])
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
mounted = True
except nova_exception.StaleVolumeMount:
mounted = False
LOG.info('Fixing previous mount %s which was not '
'unmounted correctly.', mount_path)
umount_volume(mount_path)
except nova_exception.InvalidVolume:
mounted = False
if not mounted:
mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as nex:
LOG.error("Could not mount Quobyte volume: %s", nex)
@utils.synchronized('connect_qb_volume')
def disconnect_volume(self, connection_info, instance):
"""Disconnect the volume."""
mount_path = self._get_mount_path(connection_info)
try:
validate_volume(mount_path)
except (nova_exception.InvalidVolume,
nova_exception.StaleVolumeMount) as exc:
LOG.warning("Could not disconnect Quobyte volume mount: %s", exc)
else:
umount_volume(mount_path)
def _normalize_export(self, export):
protocol = SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
|
flp9001/clevenus
|
clevenus/config/wsgi.py
|
Python
|
gpl-3.0
| 395 | 0.002532 |
"""
WSGI config for astrology project.
It exposes the WSGI callable as a module-level variable named ``application``.
|
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.prod")
from django.core.wsgi import get_wsg
|
i_application
application = get_wsgi_application()
|
DeastinY/srpdfcrawler
|
pdf_search.py
|
Python
|
gpl-3.0
| 2,775 | 0.000721 |
import os
import sys
import sqlite3
import logging
from tqdm import tqdm
from pathlib import Path
from whoosh.index import create_in, open_dir
from whoosh.fields import Schema, TEXT, NUMERIC
from whoosh.qparser import QueryParser
from whoosh.spelling import ListCorrector
from whoosh.highlight import UppercaseFormatter
logging.basicConfig(level=logging.INFO)
if getattr(sys, 'frozen', False):
APPLICATION_PATH = os.path.dirname(sys.executable)
elif __file__:
APPLICATION_PATH = os.path.dirname(__file__)
PATH = APPLICATION_PATH
PATH_DATA = Path(PATH) / 'data'
FILE_DB = PATH_DATA / "data.db"
class Searcher:
def __init__(self):
self.scope = 20
self.terms = set()
self.index_path = "index"
self.common_terms = set()
self.schema = Schema(
title=TEXT(stored=True),
path=TEXT(stored=True),
page=NUMERIC(stored=True),
content=TEXT(stored=True))
self.ix = None
self.index_files = False
if not os.path.exists(self.index_path):
os.mkdir(self.index_path)
self.ix = create_in(self.index_path, self.schema)
self.index_files = True
else:
self.ix = open_dir(self.index_path)
self.writer = self.ix.writer()
self.read()
self.writer.commit()
self.searcher = self.ix.searcher()
self.corrector = ListCorrector(sorted(list(self.common_terms)))
self.parser = QueryParser("content", self.ix.schema)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.searcher.close()
def search(self, term):
results = []
suggestions = [term]+(self.corrector.suggest(term, limit=5))
for t in suggestions:
query = self.parser.parse
|
(t)
query_res = self.searcher.search(query, limit=100)
query_res.fragmenter.maxchars = 300
query_res.fragmenter.surround = 100
query_res.formatter = UppercaseFormatter()
results.append((t, query_res))
return results
def read(self):
logging.info("Indexing")
|
con = sqlite3.connect(str(FILE_DB))
cur = con.cursor()
cur.execute(r"SELECT BOOKS.NAME, PAGE, CONTENT "
r"FROM TEXT, BOOKS "
r"WHERE BOOK = BOOKS.ID "
r"ORDER BY BOOKS.NAME, PAGE")
for row in tqdm(cur):
book, page, content = row
book, page, content = str(book), str(page), str(content)
for i in content.split(' '):
self.common_terms.add(i)
if self.index_files:
self.writer.add_document(title=book, content=content, path=book, page=page)
|
jakobharlan/avango
|
examples/simple_example/main.py
|
Python
|
lgpl-3.0
| 3,625 | 0.000552 |
import avango
import avango.script
import avango.gua
from examples_common.GuaVE import GuaVE
class TimedRotate(avango.script.Script):
TimeIn = avango.SFFloat()
MatrixOut = avango.gua.SFMatrix4()
def evaluate(self):
self.MatrixOut.value = avango.gua.make_rot_mat(
self.TimeIn.value * 2.0, 0.0, 1.0, 0.0)
def start():
# setup scenegraph
graph = avango.gua.nodes.SceneGraph(Name="scenegraph")
loader = avango.gua.nodes.TriMeshLoader()
monkey1 = loader.create_geometry_from_file(
"monkey", "data/objects/monkey.obj",
avango.gua.LoaderFlags.NORMALIZE_SCALE)
monkey2 = loader.create_geometry_from_file(
"monkey", "data/objects/monkey.obj",
avango.gua.LoaderFlags.NORMALIZE_SCALE)
monkey1.Material.value.set_uniform(
"Color", avango.gua.Vec4(1.0, 0.766, 0.336, 1.0))
monkey1.Material.value.set_uniform("Roughness", 0.3)
monkey1.Material.value.set_uniform("Metalness", 1.0)
monkey2.Material.value.set_uniform(
"Color", avango.gua.Vec4(1.0, 0.266, 0.136, 1.0))
monkey2.Material.value.set_uniform("Roughness", 0.6)
monkey2.Material.value.set_uniform("Metalness", 0.0)
transform1 = avango.gua.nodes.TransformNode(Children=[monkey1])
transform2 = avango.gua.nodes.TransformNode(
Transform=avango.gua.make_trans_mat
|
(-0.5, 0.0, 0.0),
Children=[monkey2])
light = avango.gua.nodes.LightNode(
Type=avango.gua.LightType.POINT,
Name="light",
Color=avango.gua.Color(1.0, 1.0, 1.0),
Brightness=100.0,
|
Transform=(avango.gua.make_trans_mat(1, 1, 5) *
avango.gua.make_scale_mat(30, 30, 30)))
size = avango.gua.Vec2ui(1024, 768)
window = avango.gua.nodes.GlfwWindow(Size=size, LeftResolution=size)
avango.gua.register_window("window", window)
cam = avango.gua.nodes.CameraNode(
LeftScreenPath="/screen",
SceneGraph="scenegraph",
Resolution=size,
OutputWindowName="window",
Transform=avango.gua.make_trans_mat(0.0, 0.0, 3.5))
res_pass = avango.gua.nodes.ResolvePassDescription()
res_pass.EnableSSAO.value = True
res_pass.SSAOIntensity.value = 4.0
res_pass.SSAOFalloff.value = 10.0
res_pass.SSAORadius.value = 7.0
#res_pass.EnableScreenSpaceShadow.value = True
res_pass.EnvironmentLightingColor.value = avango.gua.Color(0.1, 0.1, 0.1)
res_pass.ToneMappingMode.value = avango.gua.ToneMappingMode.UNCHARTED
res_pass.Exposure.value = 1.0
res_pass.BackgroundColor.value = avango.gua.Color(0.45, 0.5, 0.6)
anti_aliasing = avango.gua.nodes.SSAAPassDescription()
pipeline_description = avango.gua.nodes.PipelineDescription(
Passes=[
avango.gua.nodes.TriMeshPassDescription(),
avango.gua.nodes.LightVisibilityPassDescription(),
res_pass,
anti_aliasing,
])
cam.PipelineDescription.value = pipeline_description
screen = avango.gua.nodes.ScreenNode(
Name="screen",
Width=2,
Height=1.5,
Children=[cam])
graph.Root.value.Children.value = [transform1, transform2, light, screen]
#setup viewer
viewer = avango.gua.nodes.Viewer()
viewer.SceneGraphs.value = [graph]
viewer.Windows.value = [window]
monkey_updater = TimedRotate()
timer = avango.nodes.TimeSensor()
monkey_updater.TimeIn.connect_from(timer.Time)
transform1.Transform.connect_from(monkey_updater.MatrixOut)
guaVE = GuaVE()
guaVE.start(locals(), globals())
viewer.run()
if __name__ == '__main__':
start()
|
lemonad/molnetbot
|
molnetbot/xep0012.py
|
Python
|
mit
| 2,344 | 0.000427 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
The MIT license
Copyright (c) 2010 Jonas Nockert
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---
XEP-0012: Last Activity handler
"""
from datetime
|
import datetime
import time
from twisted.words.protocols.jabber.xmlstream import toResponse
from wokkel.subprotocols impor
|
t IQHandlerMixin, XMPPHandler
NS_LAST_ACTIVITY = 'jabber:iq:last'
LAST_ACTIVITY = '/iq[@type="get"]/query[@xmlns="' + NS_LAST_ACTIVITY +'"]'
class LastActivityHandler(XMPPHandler, IQHandlerMixin):
"""
XMPP subprotocol handler for Last Activity extension.
This protocol is described in
U{XEP-0012<http://www.xmpp.org/extensions/xep-0012.html>}.
"""
iqHandlers = {LAST_ACTIVITY: 'onLastActivityGet'}
def __init__(self, get_last=lambda: 0):
self.get_last = get_last
def connectionInitialized(self):
self.xmlstream.addObserver(LAST_ACTIVITY, self.handleRequest)
def onLastActivityGet(self, iq):
"""Handle a request for last activity."""
response = toResponse(iq, 'result')
# TODO: Replace 'hello world!' string with something proper.
query = response.addElement((NS_LAST_ACTIVITY, 'query'),
content="Hello world!")
query['seconds'] = str(self.get_last())
self.send(response)
iq.handled = True
|
SebastianoF/LabelsManager
|
tests/tools/test_image_colors_manip_relabeller.py
|
Python
|
mit
| 6,757 | 0.00222 |
import numpy as np
import pytest
from nilabels.tools.image_colors_manipulations.relabeller import relabeller, permute_labels, erase_labels, \
assign_all_other_labels_the_same_value, keep_only_one_label, relabel_half_side_one_label
def test_relabeller_basic():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, range(10), range(10)[::-1])
np.testing.assert_array_equal(relabelled_data, np.array(range(10)[::-1]).reshape(2,5))
def test_relabeller_one_element():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 0, 1, verbose=1)
expected_output = data[:]
expected_output[0, 0] = 1
np.testing.assert_array_equal(relabelled_data, expected_output)
def test_relabeller_one_element_not_in_array():
data = np.array(range(10)).reshape(2, 5)
relabelled_data = relabeller(data, 15, 1, verbose=1)
np.testing.assert_array_equal(relabelled_data, data)
def test_relabeller_wrong_input():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabeller(data, [1, 2], [3, 4, 4])
def test_permute_labels_invalid_permutation():
invalid_permutation = [[3, 3, 3], [1, 1, 1]]
with pytest.raises(IOError):
permute_labels(np.zeros([3, 3]), invalid_permutation)
def test_permute_labels_valid_permutation():
data = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
valid_permutation = [[1, 2, 3], [1, 3, 2]]
perm_data = permute_labels(data, valid_permutation)
expected_data = np.array([[1, 3, 2],
[1, 3, 2],
[1, 3, 2]])
np.testing.assert_equal(perm_data, expected_data)
def test_erase_label_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
expected_output = data[:]
expected_output[0, 1] = 0
np.testing.assert_array_equal(data_erased_1, expected_output)
def test_assign_all_other_labels_the_same_values_simple():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = erase_labels(data, 1)
data_labels_to_keep = assign_all_other_labels_the_same_value(data, range(2, 10), same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_assign_all_other_labels_the_same_values_single_value():
data = np.array(range(10)).reshape(2, 5)
data_erased_1 = np.zeros_like(data)
data_erased_1[0, 1] = 1
data_labels_to_keep = assign_all_other_labels_the_same_value(data, 1, same_value_label=0)
np.testing.assert_array_equal(data_erased_1, data_labels_to_keep)
def test_keep_only_one_label_label_simple():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 1)
expected_data = np.zeros([2, 5])
expected_data[0, 1] = 1
np.testing.assert_array_equal(new_data, expected_data)
def test_keep_only_one_label_label_not_present():
data = np.array(range(10)).reshape(2, 5)
new_data = keep_only_one_label(data, 120)
np.testing.assert_array_equal(new_data, data)
def test_relabel_half_side_one_label_wrong_input_shape():
data = np.array(range(10)).reshape(2, 5)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_side():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='spam',
axis='x', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_axis():
data = np.array(range(27)).reshape(3, 3, 3)
with np.testing.assert_raises(IOError):
relabel_half_side_one_label(data, label_old=[1, 2], label_new=[2, 1], side_to_modify='above',
axis='spam', plane_intercept=2)
def test_relabel_half_side_one_label_wrong_input_simple():
data = np.array(range(3 ** 3)).reshape(3, 3,
|
3)
# Z above
new_data = relabel_half_side_one_label(data, label_old=1, label_new=100, side_to_modify='above',
axis='z', plane_intercept=1)
expected_data = data[:]
expected_data[0, 0, 1] = 100
np.testing.assert_array_eq
|
ual(new_data, expected_data)
# Z below
new_data = relabel_half_side_one_label(data, label_old=3, label_new=300, side_to_modify='below',
axis='z', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 0] = 300
np.testing.assert_array_equal(new_data, expected_data)
# Y above
new_data = relabel_half_side_one_label(data, label_old=8, label_new=800, side_to_modify='above',
axis='y', plane_intercept=1)
expected_data = data[:]
expected_data[0, 2, 2] = 800
np.testing.assert_array_equal(new_data, expected_data)
# Y below
new_data = relabel_half_side_one_label(data, label_old=6, label_new=600, side_to_modify='below',
axis='y', plane_intercept=2)
expected_data = data[:]
expected_data[0, 2, 0] = 600
np.testing.assert_array_equal(new_data, expected_data)
# X above
new_data = relabel_half_side_one_label(data, label_old=18, label_new=180, side_to_modify='above',
axis='x', plane_intercept=1)
expected_data = data[:]
expected_data[2, 0, 0] = 180
np.testing.assert_array_equal(new_data, expected_data)
# X below
new_data = relabel_half_side_one_label(data, label_old=4, label_new=400, side_to_modify='below',
axis='x', plane_intercept=2)
expected_data = data[:]
expected_data[0, 1, 1] = 400
np.testing.assert_array_equal(new_data, expected_data)
if __name__ == '__main__':
test_relabeller_basic()
test_relabeller_one_element()
test_relabeller_one_element_not_in_array()
test_relabeller_wrong_input()
test_permute_labels_invalid_permutation()
test_permute_labels_valid_permutation()
test_erase_label_simple()
test_assign_all_other_labels_the_same_values_simple()
test_assign_all_other_labels_the_same_values_single_value()
test_keep_only_one_label_label_simple()
test_keep_only_one_label_label_not_present()
test_relabel_half_side_one_label_wrong_input_shape()
test_relabel_half_side_one_label_wrong_input_side()
test_relabel_half_side_one_label_wrong_input_axis()
test_relabel_half_side_one_label_wrong_input_simple()
|
RobCranfill/weewx
|
bin/weewx/drivers/ws1.py
|
Python
|
gpl-3.0
| 9,124 | 0.000986 |
#!/usr/bin/env python
#
# Copyright 2014 Matthew Wall
# See the file LICENSE.txt for your rights.
"""Driver for ADS WS1 weather stations.
Thanks to Steve (sesykes71) for the testing that made this driver possible.
Thanks to Jay Nugent (WB8TKL) and KRK6 for weather-2.kr6k-V2.1
http://server1.nuge.com/~weather/
"""
from __future__ import with_statement
import serial
import syslog
import time
import weewx.drivers
DRIVER_NAME = 'WS1'
DRIVER_VERSION = '0.19'
def loader(config_dict, _):
return WS1Driver(**config_dict[DRIVER_NAME])
def confeditor_loader():
return WS1ConfEditor()
INHG_PER_MBAR = 0.0295333727
METER_PER_FOOT = 0.3048
MILE_PER_KM = 0.621371
DEFAULT_PORT = '/dev/ttyS0'
DEBUG_READ = 0
def logmsg(level, msg):
syslog.syslog(level, 'ws1: %s' % msg)
def logdbg(msg):
logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg):
logmsg(syslog.LOG_INFO, msg)
def logerr(msg):
logmsg(syslog.LOG_ERR, msg)
class WS1Driver(weewx.drivers.AbstractDevice):
"""weewx driver that communicates with an ADS-WS1 station
port - serial port
[Required. Default is /dev/ttyS0]
max_tries - how often to retry serial communication before giving up
[Optional. Default is 5]
retry_wait - how long to wait, in seconds, before retrying after a failure
[Optional. Default is 10]
"""
def __init__(self, **stn_dict):
self.port = stn_dict.get('port', DEFAULT_PORT)
self.max_tries = int(stn_dict.get('max_tries', 5))
self.retry_wait = int(stn_dict.get('retry_wait', 10))
self.last_rain = None
loginf('driver version is %s' % DRIVER_VERSION)
loginf('using serial
|
port %s' % self.port)
global DEBUG_READ
DEBUG_READ = int(stn_dict.get('debug_read', DEBUG_READ))
self.station = Station(self.port)
self.station.open()
def closePort(self):
if self.station is not None:
|
self.station.close()
self.station = None
@property
def hardware_name(self):
return "WS1"
def genLoopPackets(self):
while True:
packet = {'dateTime': int(time.time() + 0.5),
'usUnits': weewx.US}
readings = self.station.get_readings_with_retry(self.max_tries,
self.retry_wait)
data = Station.parse_readings(readings)
packet.update(data)
self._augment_packet(packet)
yield packet
def _augment_packet(self, packet):
# calculate the rain delta from rain total
if self.last_rain is not None:
packet['rain'] = packet['long_term_rain'] - self.last_rain
else:
packet['rain'] = None
self.last_rain = packet['long_term_rain']
# no wind direction when wind speed is zero
if 'windSpeed' in packet and not packet['windSpeed']:
packet['windDir'] = None
class Station(object):
def __init__(self, port):
self.port = port
self.baudrate = 2400
self.timeout = 3
self.serial_port = None
def __enter__(self):
self.open()
return self
def __exit__(self, _, value, traceback):
self.close()
def open(self):
logdbg("open serial port %s" % self.port)
self.serial_port = serial.Serial(self.port, self.baudrate,
timeout=self.timeout)
def close(self):
if self.serial_port is not None:
logdbg("close serial port %s" % self.port)
self.serial_port.close()
self.serial_port = None
# FIXME: use either CR or LF as line terminator. apparently some ws1
# hardware occasionally ends a line with only CR instead of the standard
# CR-LF, resulting in a line that is too long.
def get_readings(self):
buf = self.serial_port.readline()
if DEBUG_READ:
logdbg("bytes: '%s'" % ' '.join(["%0.2X" % ord(c) for c in buf]))
buf = buf.strip()
return buf
def get_readings_with_retry(self, max_tries=5, retry_wait=10):
for ntries in range(0, max_tries):
try:
buf = self.get_readings()
Station.validate_string(buf)
return buf
except (serial.serialutil.SerialException, weewx.WeeWxIOError), e:
loginf("Failed attempt %d of %d to get readings: %s" %
(ntries + 1, max_tries, e))
time.sleep(retry_wait)
else:
msg = "Max retries (%d) exceeded for readings" % max_tries
logerr(msg)
raise weewx.RetriesExceeded(msg)
@staticmethod
def validate_string(buf):
if len(buf) != 50:
raise weewx.WeeWxIOError("Unexpected buffer length %d" % len(buf))
if buf[0:2] != '!!':
raise weewx.WeeWxIOError("Unexpected header bytes '%s'" % buf[0:2])
return buf
@staticmethod
def parse_readings(raw):
"""WS1 station emits data in PeetBros format:
http://www.peetbros.com/shop/custom.aspx?recid=29
Each line has 50 characters - 2 header bytes and 48 data bytes:
!!000000BE02EB000027700000023A023A0025005800000000
SSSSXXDDTTTTLLLLPPPPttttHHHHhhhhddddmmmmRRRRWWWW
SSSS - wind speed (0.1 kph)
XX - wind direction calibration
DD - wind direction (0-255)
TTTT - outdoor temperature (0.1 F)
LLLL - long term rain (0.01 in)
PPPP - pressure (0.1 mbar)
tttt - indoor temperature (0.1 F)
HHHH - outdoor humidity (0.1 %)
hhhh - indoor humidity (0.1 %)
dddd - date (day of year)
mmmm - time (minute of day)
RRRR - daily rain (0.01 in)
WWWW - one minute wind average (0.1 kph)
"""
# FIXME: peetbros could be 40 bytes or 44 bytes, what about ws1?
# FIXME: peetbros uses two's complement for temp, what about ws1?
# FIXME: for ws1 is the pressure reading 'pressure' or 'barometer'?
buf = raw[2:]
data = dict()
data['windSpeed'] = Station._decode(buf[0:4], 0.1 * MILE_PER_KM) # mph
data['windDir'] = Station._decode(buf[6:8], 1.411764) # compass deg
data['outTemp'] = Station._decode(buf[8:12], 0.1) # degree_F
data['long_term_rain'] = Station._decode(buf[12:16], 0.01) # inch
data['pressure'] = Station._decode(buf[16:20], 0.1 * INHG_PER_MBAR) # inHg
data['inTemp'] = Station._decode(buf[20:24], 0.1) # degree_F
data['outHumidity'] = Station._decode(buf[24:28], 0.1) # percent
data['inHumidity'] = Station._decode(buf[28:32], 0.1) # percent
data['day_of_year'] = Station._decode(buf[32:36])
data['minute_of_day'] = Station._decode(buf[36:40])
data['daily_rain'] = Station._decode(buf[40:44], 0.01) # inch
data['wind_average'] = Station._decode(buf[44:48], 0.1 * MILE_PER_KM) # mph
return data
@staticmethod
def _decode(s, multiplier=None, neg=False):
v = None
try:
v = int(s, 16)
if neg:
bits = 4 * len(s)
if v & (1 << (bits - 1)) != 0:
v -= (1 << bits)
if multiplier is not None:
v *= multiplier
except ValueError, e:
if s != '----':
logdbg("decode failed for '%s': %s" % (s, e))
return v
class WS1ConfEditor(weewx.drivers.AbstractConfEditor):
@property
def default_stanza(self):
return """
[WS1]
# This section is for the ADS WS1 series of weather stations.
# Serial port such as /dev/ttyS0, /dev/ttyUSB0, or /dev/cuaU0
port = /dev/ttyUSB0
# The driver to use:
driver = weewx.drivers.ws1
"""
def prompt_for_settings(self):
print "Specify the serial port on which the station is connected, for"
print "example /dev/ttyUSB0 or /dev/ttyS0."
port = self._prompt('port', '/dev/ttyUSB0')
return {'port': port}
# define a main entry point for basic testing of the station without weewx
|
aaronzhang1990/workshare
|
test/python/addClasses.py
|
Python
|
gpl-2.0
| 1,111 | 0.032403 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
import uuid, pprint
def generate(data):
gdata = []
fo
|
r grade in range(1,4):
for clazz in range(1,10):
if grade != data['grade_number'] and clazz != data['class_number']:
gdata.append("insert into classes(uuid, grade_number, class_number, school_uuid) values('%s', %d, %d, '%s');
|
" % (unicode(uuid.uuid4()), grade, clazz, data['school_uuid']))
return gdata
def main():
config = {'user': 'root', 'passwd': 'oseasy_db', 'db': 'banbantong', 'use_unicode': True, 'charset': 'utf8'}
conn = mdb.connect(**config)
if not conn: return
cursor = conn.cursor()
cursor.execute('select grade_number, class_number, school_uuid from classes;')
base = {}
desc = cursor.description
data = cursor.fetchone()
for i, x in enumerate(data):
base[desc[i][0]] = data[i]
moreData = generate(base)
#cursor.executemany('insert into classes(uuid, grade_number, class_number, school_uuid) values(%s, %d, %d, %s)', moreData)
for sql in moreData:
cursor.execute(sql)
conn.commit()
cursor.close()
conn.close()
if __name__ == "__main__":
main()
|
junneyang/simplepbrpc
|
homedir/items.py
|
Python
|
mit
| 8,104 | 0.006674 |
#!/usr/bin/env python
# coding=utf-8
import errno
import os
import sys
import fileinput
import string
import logging
import traceback
import hashlib
import time
import re
from datetime import date, timedelta
import datetime
from subprocess import call
import redis
from datasource import DataSource
class Items(DataSource):
def __init__(self, redisClientManager, config, act):
DataSource.__init__(self, config, act)
self.redisClientManager = redisClientManager
self.downloadedDir = ""
self.key = ""
if os.path.exists(self.dir + "/downloaded.txt"):
with open(self.dir + "/downloaded.txt", 'r') as content_file:
self.downloadedDir = content_file.read()
def saveDownloadedDir(self, dir):
self.downloadedDir = dir
with open(self.dir + "/downloaded.txt", "w") as text_file:
text_file.write(dir)
def isOkFloatString(self, value):
for c in value:
if c == '.':
continue
if ord(c) <48 or ord(c) > 57:
return False
return True
def download(self):
try:
cmd = "rm -rf " + self.dir + "/*"
call(cmd, shell=True)
cmd = "hadoop fs -get " + self.download_url + " " + self.dir
logging.info("[" + self.name + "]" + "Downloading file:" + self.download_url)
retcode = call(cmd, shell=True)
if retcode != 0:
logging.error("Child was terminated by signal:" + str(retcode) + " for cmd:" + cmd)
return False
else:
self.saveDownloadedDir(self.datedir)
return True
except:
tb = traceback.format_exc()
logging.error("Some error occured:\n" + tb)
return False
def __parseImport(self, filename, name):
file = open(filename, 'r')
count = 0
ff = name.split('_')
prefix= self.config["prefix"] + ":"
while 1:
lines = file.readlines(10000)
if not lines:
break
for line in lines:
line = line.strip()
if count % 100000 == 0 and count != 0:
logging.info("[" + self.name + "]" + str(count) + " lines parsed and imported to redis for file:" + filename)
count = count + 1
#ss = re.split(r'\t+', line.rstrip('\t'))
line = line.rstrip("\n")
ss = line.split("\t")
if len(ss) != 11:
print "fxxk you man!"
exit(1)
#poi_id = ss[0]
#�쳣�ַ���
poi_id = ss[0]
if not all(ord(c) < 128 for c in poi_id):
logging.error("[" + self.name + "]Discard invalid line:" + line + "\n")
continue
if len(poi_id) > 50:
logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!")
continue
key = prefix + poi_id
value = ""
i = 1
tag = 0
while i < len(ss):
# if not self.isOkFloatString(ss[i]):
# tag = 1
# break
if i == 1:
value = ss[i]
else:
value = value + ":" + ss[i]
i = i+1
# if tag == 1:
# logging.error("filename:" + filename + ", line:" + str(count) +", not all nums are right")
# continue
clients = self.redisClientManager.getClientsByShardKey("items", poi_id)
self.cnt+=1
if self.key != key:
for client in clients:
client.pipeline.delete(key)
self.key = key
for client in clients:
client.pipeline.sadd(key, value)
client.IncrPipeCount()
if client.pipecount >= 100:
client.commit()
file.close()
return True
def parseImport(self):
fs = os.listdir(self.dir)
for file in fs:
if file == "status.txt" or file == "downloaded.txt":
continue
while True:
try:
logging.info("[" + self.name + "]Start parsing import data from file:" + file)
self.__parseImport(self.dir + "/" + file, file)
self.redisClientManager.commitClients("items")
break
except:
tb = traceback.format_exc()
logging.error("Some error occured to parsing import file:" + file + "\n" + tb)
time.sleep(60)
return True
def __delete(self, filename):
fi = open(filename, 'r')
count = 0
prefix= self.config["prefix"] + ":"
while 1:
lines = fi.readlines(10000)
if not lines:
break
for line in lines:
line = line.strip()
if count % 100000 == 0 and count != 0:
logging.info("[" + self.name + "]" + str(count) + " lines parsed and deleted from redis for file:" + filename)
count = count + 1
ss = re.split(r'\t+', line.rstrip('\t'))
#poi_id = ss[0]
poi_id = ss[0]
if not all(ord(c) < 128 for c in poi_id):
logging.error("[" + self.name + "]Discard invalid line:" + line + "\n")
continue
if len(poi_id) > 50:
logging.error("filename:" + filename + ", line:" + str(count) +", cuid too long!")
continue
key = prefix + poi_id
clients = self.redisClientManager.getClientsByShardKey("items", poi_id)
for client in clients:
client.pipeline.delete(key)
client.IncrPipeCount()
if client.pipecount >= 100:
client.commit()
fi.close()
return True
def delete(self):
fs = os.listdir(self.dir)
for fi in fs:
if fi == "status.txt" or fi == "downloaded.txt":
continue
while True:
try:
logging.info("[" + self.name + "]Start parsing delete data from file:" + fi)
self.__delete(self.dir + "/" + fi)
self.redisClientManager.commitClients("items")
break
except:
tb = traceback.format_exc()
logging.error("Some error occured to parsing delete file:" + fi + "\n" + tb)
|
time.sleep(60)
return True
def checkAvailable(self):
try:
if self.action == "import":
yesterday = date.today() - timedelta(1)
self.datedir = yesterday.strftime('%Y%m%d')
#self.datedir = "."
if self.datedir == self.downloadedDir:
return 0
elif self.action == "delete":
self.datedir = self.del_date
|
if self.datedir == self.downloadedDir:
return 2
self.download_url = self.config["url"].replace("${date}", self.datedir)
donefile = self.config["checkfile"].replace("${date}", self.datedir)
cmd = "hadoop fs -test -e " + donefile
retcode = call(cmd, shell=True)
if retcode == 0:
return 1
return 0
except:
tb = traceback.format_exc()
logging.error("Some error occured:\n" + tb)
return 0
return 0
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-rngtools/package.py
|
Python
|
lgpl-2.1
| 2,067 | 0.000484 |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WA
|
RRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more d
|
etails.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RRngtools(RPackage):
"""This package contains a set of functions for working with Random Number
Generators (RNGs). In particular, it defines a generic S4 framework for
getting/setting the current RNG, or RNG data that are embedded into objects
for reproducibility. Notably, convenient default methods greatly facilitate
the way current RNG settings can be changed."""
homepage = "https://renozao.github.io/rngtools"
url = "https://cran.r-project.org/src/contrib/rngtools_1.2.4.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/rngtools"
version('1.2.4', '715967f8b3af2848a76593a7c718c1cd')
depends_on('r-pkgmaker', type=('build', 'run'))
depends_on('r-stringr', type=('build', 'run'))
depends_on('r-digest', type=('build', 'run'))
|
GenericStudent/home-assistant
|
tests/components/roon/test_config_flow.py
|
Python
|
apache-2.0
| 5,782 | 0.000346 |
"""Test the roon config flow."""
from homeassistant import config_entries, setup
from homeassistant.components.roon.const import DOMAIN
from homeassistant.const import CONF_HOST
from tests.async_mock import patch
from tests.common import MockConfigEntry
class RoonApiMock:
"""Mock to handle returning tokens for testing the RoonApi."""
def __init__(self, token):
"""Initialize."""
self._token = token
@property
def token(self):
"""Return the auth token from the api."""
return self._token
def stop(self): # pylint: disable=no-self-use
"""Close down the api."""
return
async def test_form_and_auth(hass):
"""Test we get the form."""
await setup.async_setup_component(hass
|
, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] ==
|
"form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_no_token(hass):
"""Test we handle no token being returned (timeout or not authorized)."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock(None),
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_unknown_exception(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.roon.config_flow.RoonApi",
side_effect=Exception,
):
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_host_already_exists(hass):
"""Test we add the host if the config exists and it isn't a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("homeassistant.components.roon.config_flow.TIMEOUT", 0,), patch(
"homeassistant.components.roon.const.AUTHENTICATE_TIMEOUT",
0,
), patch(
"homeassistant.components.roon.config_flow.RoonApi",
return_value=RoonApiMock("good_token"),
), patch(
"homeassistant.components.roon.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.roon.async_setup_entry",
return_value=True,
) as mock_setup_entry:
await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "1.1.1.1"}
)
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Roon Labs Music Player"
assert result2["data"] == {"host": "1.1.1.1", "api_key": "good_token"}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 2
async def test_form_duplicate_host(hass):
"""Test we don't add the host if it's a duplicate."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "existing_host"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], {"host": "existing_host"}
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "duplicate_entry"}
|
engineer0x47/SCONS
|
engine/SCons/Tool/aixf77.py
|
Python
|
mit
| 2,681 | 0.001865 |
"""engine.SCons.Tool.aixf77
Tool-specific initialization for IBM Visual Age f77 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/aixf77.py 2014/08/24 12:12:31 garyo"
import os.path
#import SCons.Platform.aix
import f77
# It would be good to look for the AIX F77 package the same way we're now
# looking for the C and C++ packages. This should be as easy as supplying
# the correct package names in the following list and uncommenting the
# SCons.Platform.aix_get_xlc() call the in the function below.
packages = []
def get_xlf77(env):
xlf77 = env.get('F77', 'xlf77')
xlf77_r = env.get('SHF77', 'xlf77_r')
#return SCons.Platform.aix.get_xlc(env, xlf77, xlf77_r, packages)
return (None, xlf77, xlf77_r, None)
def generate(env):
"""
Add Builders and construction variables for the Visual Age FORTRAN
compiler to an Environment.
"""
path, _f77, _shf77, version = get_xlf77(env)
if path:
_f77 = os.path.join(path, _f77)
_shf77 = os.path.join(path, _shf77)
f77.generate(env)
env['F77'] = _f77
|
env['SHF77'] = _shf77
def exists(env):
path, _f77, _shf77, version = get_xlf77(env)
if path and _f77:
xlf77 = os.path.join(path, _f77)
if os.path.exists(xlf7
|
7):
return xlf77
return None
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
luzfcb/django_documentos
|
django_documentos/autocomplete_light_registry.py
|
Python
|
bsd-3-clause
| 1,391 | 0.000719 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import autocomplete_light
from django.utils.encoding import force_text
from .settings import USER_MODEL
from .utils.module_loading import get_real_model_class
class UserAutocomplete(autocomplete_light.AutocompleteModelBase):
search_fields = [
'^first_name',
'last_name',
'username'
]
model = get_real_model_class(USER_MODEL)
order_by = ['first_name', 'last_name']
# choice_template = 'django_documentos/user_choice_autocomplete.html'
limit_choices = 10
attrs = {
'data-autcomplete-minimum-characters': 0,
'placeholder': 'Pessoa que irá assinar',
}
# widget_attrs = {'data-widget-maximum-values': 3}
def choice_value(self, choice):
"""
Return the pk of the choice by default.
"""
return choice.pk
def choice_label(self, choice):
"""
Return the textual representation of the choice by default.
"""
# return force_text("{}-{}".format(choice.pk, choice.get_full_name().title()))
return force_text(choice.get_full_name().title())
# def choice_label(self, choice):
# return
|
choice.get_full_name().title()
def choices_for_request(self):
return super(UserAutoc
|
omplete, self).choices_for_request()
autocomplete_light.register(UserAutocomplete)
|
iMuduo/FastSync
|
setup.py
|
Python
|
apache-2.0
| 690 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Muduo
'''
FastSync
'''
from setuptools import setup, find_packages
setup(
name='FastSync',
version='0.2.0.3',
packages=find_packages(),
install_requires=[
'requests',
'watchdog
|
',
'pycrypto',
'future',
'web.py'
],
entry_points={
'console_scripts': [
'fsnd = sync:sending',
'frcv = sync:receiving',
],
},
license='Apache License',
author='Muduo',
au
|
thor_email='imuduo@163.com',
url='https://github.com/iMuduo/FastSync',
description='Event driven fast synchronization tool',
keywords=['sync'],
)
|
glemaitre/scikit-learn
|
sklearn/mixture/tests/test_gaussian_mixture.py
|
Python
|
bsd-3-clause
| 40,311 | 0 |
# Author: Wei Xue <xuewei4d@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import re
import sys
import copy
import warnings
import pytest
import numpy as np
from scipy import stats, linalg
from sklearn.covariance import EmpiricalCovariance
from sklearn.datasets import make_spd_matrix
from io import StringIO
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.mixture import GaussianMixture
from sklearn.mixture._gaussian_mixture import (
_estimate_gaussian_covariances_full,
_estimate_gaussian_covariances_tied,
_estimate_gaussian_covariances_diag,
_estimate_gaussian_covariances_spherical,
_compute_precision_cholesky,
_compute_log_det_cholesky,
)
from sklearn.exceptions import ConvergenceWarning, NotFittedError
from sklearn.utils.extmath import fast_logdet
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
COVARIANCE_TYPE = ['full', 'tied', 'diag', 'spherical']
def generate_data(n_samples, n_features, weights, means, precisions,
covariance_type):
rng = np.random.RandomState(0)
X = []
if covariance_type == 'spherical':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['spherical'])):
X.append(rng.multivariate_normal(m, c * np.eye(n_features),
int(np.round(w * n_samples))))
if covariance_type == 'diag':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['diag'])):
X.append(rng.multivariate_normal(m, np.diag(c),
int(np.round(w * n_samples))))
if covariance_type == 'tied':
for _, (w, m) in enumerate(zip(weights, means)):
X.append(rng.multivariate_normal(m, precisions['tied'],
int(np.round(w * n_samples))))
if covariance_type == 'full':
for _, (w, m, c) in enumerate(zip(weights, means,
precisions['full'])):
X.append(rng.multivariate_normal(m, c,
int(np.round(w * n_samples))))
X = np.vstack(X)
return X
class RandomData:
def __init__(self, rng, n_samples=200, n_components=2, n_features=2,
scale=50):
self.n_samples = n_samples
self.n_components = n_components
self.n_features = n_features
self.weights = rng.rand(n_components)
self.weights = self.weights / self.weights.sum()
self.means = rng.rand(n_components, n_features) * scale
self.covariances = {
'spherical': .5 + rng.rand(n_components),
'diag': (.5 + rng.rand(n_components, n_features)) ** 2,
'tied': make_spd_matrix(n_features, random_state=rng),
'full': np.array([
make_spd_matrix(n_features, random_state=rng) * .5
for _ in range(n_components)])}
self.precisions = {
'sphe
|
rical': 1. / self.covariances['spherical'],
'diag': 1. / self.covariances['diag'],
'tied': linalg.inv(self.covariances['tied']),
'full': np.array([linalg.inv(
|
covariance)
for covariance in self.covariances['full']])}
self.X = dict(zip(COVARIANCE_TYPE, [generate_data(
n_samples, n_features, self.weights, self.means, self.covariances,
covar_type) for covar_type in COVARIANCE_TYPE]))
self.Y = np.hstack([np.full(int(np.round(w * n_samples)), k,
dtype=int)
for k, w in enumerate(self.weights)])
def test_gaussian_mixture_attributes():
# test bad parameters
rng = np.random.RandomState(0)
X = rng.rand(10, 2)
n_components_bad = 0
gmm = GaussianMixture(n_components=n_components_bad)
msg = (
f"Invalid value for 'n_components': {n_components_bad} "
"Estimation requires at least one component"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
# covariance_type should be in [spherical, diag, tied, full]
covariance_type_bad = 'bad_covariance_type'
gmm = GaussianMixture(covariance_type=covariance_type_bad)
msg = (
f"Invalid value for 'covariance_type': {covariance_type_bad} "
"'covariance_type' should be in ['spherical', 'tied', 'diag', 'full']"
)
with pytest.raises(ValueError):
gmm.fit(X)
tol_bad = -1
gmm = GaussianMixture(tol=tol_bad)
msg = (
f"Invalid value for 'tol': {tol_bad:.5f} "
"Tolerance used by the EM must be non-negative"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
reg_covar_bad = -1
gmm = GaussianMixture(reg_covar=reg_covar_bad)
msg = (
f"Invalid value for 'reg_covar': {reg_covar_bad:.5f} "
"regularization on covariance must be non-negative"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
max_iter_bad = 0
gmm = GaussianMixture(max_iter=max_iter_bad)
msg = (
f"Invalid value for 'max_iter': {max_iter_bad} "
"Estimation requires at least one iteration"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
n_init_bad = 0
gmm = GaussianMixture(n_init=n_init_bad)
msg = (
f"Invalid value for 'n_init': {n_init_bad} "
"Estimation requires at least one run"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
init_params_bad = 'bad_method'
gmm = GaussianMixture(init_params=init_params_bad)
msg = (
f"Unimplemented initialization method '{init_params_bad}'"
)
with pytest.raises(ValueError, match=msg):
gmm.fit(X)
# test good parameters
n_components, tol, n_init, max_iter, reg_covar = 2, 1e-4, 3, 30, 1e-1
covariance_type, init_params = 'full', 'random'
gmm = GaussianMixture(n_components=n_components, tol=tol, n_init=n_init,
max_iter=max_iter, reg_covar=reg_covar,
covariance_type=covariance_type,
init_params=init_params).fit(X)
assert gmm.n_components == n_components
assert gmm.covariance_type == covariance_type
assert gmm.tol == tol
assert gmm.reg_covar == reg_covar
assert gmm.max_iter == max_iter
assert gmm.n_init == n_init
assert gmm.init_params == init_params
def test_check_weights():
rng = np.random.RandomState(0)
rand_data = RandomData(rng)
n_components = rand_data.n_components
X = rand_data.X['full']
g = GaussianMixture(n_components=n_components)
# Check bad shape
weights_bad_shape = rng.rand(n_components, 1)
g.weights_init = weights_bad_shape
msg = re.escape(
"The parameter 'weights' should have the shape of "
f"({n_components},), but got {str(weights_bad_shape.shape)}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad range
weights_bad_range = rng.rand(n_components) + 1
g.weights_init = weights_bad_range
msg = re.escape(
"The parameter 'weights' should be in the range [0, 1], but got"
f" max value {np.min(weights_bad_range):.5f}, "
f"min value {np.max(weights_bad_range):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check bad normalization
weights_bad_norm = rng.rand(n_components)
weights_bad_norm = weights_bad_norm / (weights_bad_norm.sum() + 1)
g.weights_init = weights_bad_norm
msg = re.escape(
"The parameter 'weights' should be normalized, "
f"but got sum(weights) = {np.sum(weights_bad_norm):.5f}"
)
with pytest.raises(ValueError, match=msg):
g.fit(X)
# Check good weights matrix
weights = rand_data.weights
g = GaussianMixt
|
pupboss/xndian
|
deploy/site-packages/py2exe/samples/advanced/MyService.py
|
Python
|
mit
| 2,011 | 0.001989 |
#
# A sample service to be 'compiled' into an exe-file with py2exe.
#
# See also
# setup.py - the distutils' setup script
# setup.cfg - the distutils' config file for this
# README.txt - detailed usage notes
#
# A minimal service, doing nothing else than
# - write 'start' and 'stop' entries into the NT event log
# - when started, waits to be stopped again.
#
import win32serviceutil
import win32service
import win32event
import win32evtlogutil
class MyService(win32serviceutil.ServiceFramework):
_svc_name_ = "MyService"
_svc_display_name_ = "My Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
def SvcStop(self):
self.ReportServiceStatus(win32
|
service.SERVICE_STOP_PENDING)
win32event.Set
|
Event(self.hWaitStop)
def SvcDoRun(self):
import servicemanager
# Write a 'started' event to the event log...
win32evtlogutil.ReportEvent(self._svc_name_,
servicemanager.PYS_SERVICE_STARTED,
0, # category
servicemanager.EVENTLOG_INFORMATION_TYPE,
(self._svc_name_, ''))
# wait for beeing stopped...
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
# and write a 'stopped' event to the event log.
win32evtlogutil.ReportEvent(self._svc_name_,
servicemanager.PYS_SERVICE_STOPPED,
0, # category
servicemanager.EVENTLOG_INFORMATION_TYPE,
(self._svc_name_, ''))
if __name__ == '__main__':
# Note that this code will not be run in the 'frozen' exe-file!!!
win32serviceutil.HandleCommandLine(MyService)
|
jrbourbeau/cr-composition
|
processing/legacy/anisotropy/random_trials/process_kstest.py
|
Python
|
mit
| 7,627 | 0.002098 |
#!/usr/bin/env python
import os
import argparse
import numpy as np
import pandas as pd
import pycondor
import comptools as comp
if __name__ == "__main__":
p = argparse.ArgumentParser(
description='Extracts and saves desired information from simulation/data .i3 files')
p.add_argument('-c', '--config', dest='config',
default='IC86.2012',
choices=['IC79', 'IC86.2012', 'IC86.2013', 'IC86.2014', 'IC86.2015'],
help='Detector configuration')
p.add_argument('--low_energy', dest='low_energy',
default=False, action='store_true',
help='Only use events with energy < 10**6.75 GeV')
p.add_argument('--n_side', dest='n_side', type=int,
default=64,
help='Number of times to split the DataFrame')
p.add_argument('--chunksize', dest='chunksize', type=int,
default=1000,
|
help='Number of lines used when reading in DataF
|
rame')
p.add_argument('--n_batches', dest='n_batches', type=int,
default=50,
help='Number batches running in parallel for each ks-test trial')
p.add_argument('--ks_trials', dest='ks_trials', type=int,
default=100,
help='Number of random maps to generate')
p.add_argument('--overwrite', dest='overwrite',
default=False, action='store_true',
help='Option to overwrite reference map file, '
'if it alreadu exists')
p.add_argument('--test', dest='test',
default=False, action='store_true',
help='Option to run small test version')
args = p.parse_args()
if args.test:
args.ks_trials = 20
args.n_batches = 10000
args.chunksize = 100
# Define output directories
error = comp.paths.condor_data_dir + '/ks_test_{}/error'.format(args.config)
output = comp.paths.condor_data_dir + '/ks_test_{}/output'.format(args.config)
log = comp.paths.condor_scratch_dir + '/ks_test_{}/log'.format(args.config)
submit = comp.paths.condor_scratch_dir + '/ks_test_{}/submit'.format(args.config)
# Define path to executables
make_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'make_maps.py')
merge_maps_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'merge_maps.py')
save_pvals_ex = os.path.join(comp.paths.project_home,
'processing/anisotropy/ks_test_multipart',
'save_pvals.py')
# Create Dagman instance
dag_name = 'anisotropy_kstest_{}'.format(args.config)
if args.test:
dag_name += '_test'
dagman = pycondor.Dagman(dag_name, submit=submit, verbose=1)
# Create Job for saving ks-test p-values for each trial
save_pvals_name = 'save_pvals_{}'.format(args.config)
if args.low_energy:
save_pvals_name += '_lowenergy'
save_pvals_job = pycondor.Job(save_pvals_name, save_pvals_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
save_pvals_infiles_0 = []
save_pvals_infiles_1 = []
dagman.add_job(save_pvals_job)
outdir = os.path.join(comp.paths.comp_data_dir, args.config + '_data',
'anisotropy', 'random_splits')
if args.test:
outdir = os.path.join(outdir, 'test')
for trial_num in range(args.ks_trials):
# Create map_maps jobs for this ks_trial
make_maps_name = 'make_maps_{}_trial-{}'.format(args.config, trial_num)
if args.low_energy:
make_maps_name += '_lowenergy'
make_maps_job = pycondor.Job(make_maps_name, make_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
dagman.add_job(make_maps_job)
merge_maps_infiles_0 = []
merge_maps_infiles_1 = []
for batch_idx in range(args.n_batches):
if args.test and batch_idx > 2:
break
outfile_sample_1 = os.path.join(outdir,
'random_split_1_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
outfile_sample_0 = os.path.join(outdir,
'random_split_0_trial-{}_batch-{}.fits'.format(trial_num, batch_idx))
make_maps_arg_list = []
make_maps_arg_list.append('--config {}'.format(args.config))
make_maps_arg_list.append('--n_side {}'.format(args.n_side))
make_maps_arg_list.append('--chunksize {}'.format(args.chunksize))
make_maps_arg_list.append('--n_batches {}'.format(args.n_batches))
make_maps_arg_list.append('--batch_idx {}'.format(batch_idx))
make_maps_arg_list.append('--outfile_sample_0 {}'.format(outfile_sample_0))
make_maps_arg_list.append('--outfile_sample_1 {}'.format(outfile_sample_1))
make_maps_arg = ' '.join(make_maps_arg_list)
if args.low_energy:
make_maps_arg += ' --low_energy'
make_maps_job.add_arg(make_maps_arg)
# Add this outfile to the list of infiles for merge_maps_job
merge_maps_infiles_0.append(outfile_sample_0)
merge_maps_infiles_1.append(outfile_sample_1)
for sample_idx, input_file_list in enumerate([merge_maps_infiles_0,
merge_maps_infiles_1]):
merge_maps_name = 'merge_maps_{}_trial-{}_split-{}'.format(args.config, trial_num, sample_idx)
if args.low_energy:
merge_maps_name += '_lowenergy'
merge_maps_job = pycondor.Job(merge_maps_name, merge_maps_ex,
error=error, output=output,
log=log, submit=submit,
verbose=1)
# Ensure that make_maps_job completes before merge_maps_job begins
make_maps_job.add_child(merge_maps_job)
merge_maps_job.add_child(save_pvals_job)
dagman.add_job(merge_maps_job)
merge_infiles_str = ' '.join(input_file_list)
# Assemble merged output file path
merge_outfile = os.path.join(outdir, 'random_split_{}_trial-{}.fits'.format(sample_idx, trial_num))
merge_maps_arg = '--infiles {} --outfile {}'.format(merge_infiles_str, merge_outfile)
merge_maps_job.add_arg(merge_maps_arg)
if sample_idx == 0:
save_pvals_infiles_0.append(merge_outfile)
else:
save_pvals_infiles_1.append(merge_outfile)
save_pvals_infiles_0_str = ' '.join(save_pvals_infiles_0)
save_pvals_infiles_1_str = ' '.join(save_pvals_infiles_1)
if args.low_energy:
outfile_basename = 'ks_test_dataframe_lowenergy.hdf'
else:
outfile_basename = 'ks_test_dataframe.hdf'
outfile = os.path.join(outdir, outfile_basename)
save_pvals_arg = '--infiles_sample_0 {} --infiles_sample_1 {} ' \
'--outfile {}'.format(save_pvals_infiles_0_str, save_pvals_infiles_1_str, outfile)
save_pvals_job.add_arg(save_pvals_arg)
dagman.build_submit(fancyname=True)
|
adobe-research/spark-cluster-deployment
|
initial-deployment-puppet/modules/spark/files/spark/python/pyspark/mllib/classification.py
|
Python
|
apache-2.0
| 7,307 | 0.001505 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy
from numpy import array, shape
from pyspark import SparkContext
from pyspark.mllib._common import \
_dot, _get_unmangled_rdd, _get_unmangled_double_vector_rdd, \
_serialize_double_matrix, _deserialize_double_matrix, \
_serialize_double_vector, _deserialize_double_vector, \
_get_initial_weights, _serialize_rating, _regression_train_wrapper, \
_linear_predictor_typecheck, _get_unmangled_labeled_point_rdd
from pyspark.mllib.linalg import SparseVector
from pyspark.mllib.regression import LabeledPoint, LinearModel
from math import exp, log
class LogisticRegressionModel(LinearModel):
"""A linear binary classification model derived from logistic regression.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(data))
>>> lrm.predict(array([1.0])) > 0
True
>>> lrm.predict(array([0.0])) <= 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> lrm = LogisticRegressionWithSGD.train(sc.parallelize(sparse_data))
>>> lrm.predict(array([0.0, 1.0])) > 0
True
>>> lrm.predict(array([0.0, 0.0])) <= 0
True
>>> lrm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> lrm.predict(SparseVector(2, {1: 0.0})) <= 0
True
"""
def predic
|
t(self, x):
_linear_predictor_typechec
|
k(x, self._coeff)
margin = _dot(x, self._coeff) + self._intercept
prob = 1/(1 + exp(-margin))
return 1 if prob > 0.5 else 0
class LogisticRegressionWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, miniBatchFraction=1.0, initialWeights=None):
"""Train a logistic regression model on the given data."""
sc = data.context
train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainLogisticRegressionModelWithSGD(
d._jrdd, iterations, step, miniBatchFraction, i)
return _regression_train_wrapper(sc, train_func, LogisticRegressionModel, data,
initialWeights)
class SVMModel(LinearModel):
"""A support vector machine.
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(1.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(data))
>>> svm.predict(array([1.0])) > 0
True
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {0: -1.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(0.0, SparseVector(2, {0: 0.0})),
... LabeledPoint(1.0, SparseVector(2, {1: 2.0}))
... ]
>>> svm = SVMWithSGD.train(sc.parallelize(sparse_data))
>>> svm.predict(SparseVector(2, {1: 1.0})) > 0
True
>>> svm.predict(SparseVector(2, {0: -1.0})) <= 0
True
"""
def predict(self, x):
_linear_predictor_typecheck(x, self._coeff)
margin = _dot(x, self._coeff) + self._intercept
return 1 if margin >= 0 else 0
class SVMWithSGD(object):
@classmethod
def train(cls, data, iterations=100, step=1.0, regParam=1.0,
miniBatchFraction=1.0, initialWeights=None):
"""Train a support vector machine on the given data."""
sc = data.context
train_func = lambda d, i: sc._jvm.PythonMLLibAPI().trainSVMModelWithSGD(
d._jrdd, iterations, step, regParam, miniBatchFraction, i)
return _regression_train_wrapper(sc, train_func, SVMModel, data, initialWeights)
class NaiveBayesModel(object):
"""
Model for Naive Bayes classifiers.
Contains two parameters:
- pi: vector of logs of class priors (dimension C)
- theta: matrix of logs of class conditional probabilities (CxD)
>>> data = [
... LabeledPoint(0.0, [0.0, 0.0]),
... LabeledPoint(0.0, [0.0, 1.0]),
... LabeledPoint(1.0, [1.0, 0.0]),
... ]
>>> model = NaiveBayes.train(sc.parallelize(data))
>>> model.predict(array([0.0, 1.0]))
0.0
>>> model.predict(array([1.0, 0.0]))
1.0
>>> sparse_data = [
... LabeledPoint(0.0, SparseVector(2, {1: 0.0})),
... LabeledPoint(0.0, SparseVector(2, {1: 1.0})),
... LabeledPoint(1.0, SparseVector(2, {0: 1.0}))
... ]
>>> model = NaiveBayes.train(sc.parallelize(sparse_data))
>>> model.predict(SparseVector(2, {1: 1.0}))
0.0
>>> model.predict(SparseVector(2, {0: 1.0}))
1.0
"""
def __init__(self, labels, pi, theta):
self.labels = labels
self.pi = pi
self.theta = theta
def predict(self, x):
"""Return the most likely class for a data vector x"""
return self.labels[numpy.argmax(self.pi + _dot(x, self.theta.transpose()))]
class NaiveBayes(object):
@classmethod
def train(cls, data, lambda_=1.0):
"""
Train a Naive Bayes model given an RDD of (label, features) vectors.
This is the Multinomial NB (U{http://tinyurl.com/lsdw6p}) which can
handle all kinds of discrete data. For example, by converting
documents into TF-IDF vectors, it can be used for document
classification. By making every vector a 0-1 vector, it can also be
used as Bernoulli NB (U{http://tinyurl.com/p7c96j6}).
@param data: RDD of NumPy vectors, one per element, where the first
coordinate is the label and the rest is the feature vector
(e.g. a count vector).
@param lambda_: The smoothing parameter
"""
sc = data.context
dataBytes = _get_unmangled_labeled_point_rdd(data)
ans = sc._jvm.PythonMLLibAPI().trainNaiveBayes(dataBytes._jrdd, lambda_)
return NaiveBayesModel(
_deserialize_double_vector(ans[0]),
_deserialize_double_vector(ans[1]),
_deserialize_double_matrix(ans[2]))
def _test():
import doctest
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
xinbian/2dns
|
tests/test_python_2d_ns.py
|
Python
|
mit
| 2,191 | 0.043359 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_python_2d_ns
----------------------------------
Tests for `python_2d_ns` module.
"""
import sys
import unittest
from python_2d_ns.python_2d_ns import *
class TestPython_2d_ns(unittest.TestCase):
#test x, y coordinates generated by function
|
IC_coor
#assume use 2 threads and rank==1
#y coordinate should be the same as serial code
def test_IC_coor_y_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
self.assert
|
True(y[3,0]==-32)
self.assertTrue(y[3,5]==-27)
#x coordinate for rank 2 should start from 0
def test_IC_coor_x_coor(self):
x, y, kx, ky, k2, k2_exp=IC_coor(64, 64, 32, 1, 1, 1, 2)
#this coordinate should be 0
self.assertTrue(x[0,2]==0)
#test initial condition, Taylor green forcing, test whether the value is given on specific wavenumber
def test_IC_con(self):
#generate kx, ky, assume 2 threads, rank==0
x, y, kx, ky, k2, k2_exp=IC_coor(32, 32, 16, 1, 1, 0, 2)
Vxhat, Vyhat=IC_condition(1, 2, kx, ky, 32, 16)
#this wavenumber should be zero
self.assertTrue(Vyhat[2,5]==0)
#this wavenumber should be non-zero
self.assertTrue(Vxhat[14,14]==0.5j)
#test dealiasing function, which will remove values in wavenumber >= Nx/3
def test_delias(self):
#generate kx, ky, assume 2 threads, rank==1
Vxhat=zeros((Nx, Np), dtype=complex);
Vyhat=zeros((Nx, Np), dtype=complex);
Vxhat[:]=1
Vxhat, Vyhat=delias(Vxhat, Vyhat, Nx, Np, k2)
#this should be zero
self.assertTrue(Vxhat[Nx-1,Np-1]==0)
self.assertTrue(Vyhat[Nx-1,Np-1]==0)
#test FFT and IFFT. Take FFT and IFFT on array, it will transform back (with some numerical errors)
def test_FFT(self):
testa=zeros((Np, Ny), dtype=float);
testahat=empty(( N, Np) , dtype = complex )
if rank==0:
testa[2,0]=1
testa=ifftn_mpi(fftn_mpi(testa, testahat), testa)
#after FFT and IFFT, this value should be the same
if rank==0:
self.assertTrue(testa[2,0]-1<0.0001)
if __name__ == '__main__':
sys.exit(unittest.main())
|
Drvanon/Game
|
venv/lib/python3.3/site-packages/tornado/iostream.py
|
Python
|
apache-2.0
| 41,823 | 0.000287 |
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the spe
|
cific langua
|
ge governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import ssl
import sys
import re
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError
from tornado import stack_context
from tornado.util import bytes_type
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take callbacks (since writing and reading are
non-blocking and asynchronous).
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=4096):
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
self.read_chunk_size = read_chunk_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_bytes = None
self._read_until_close = False
self._read_callback = None
self._streaming_callback = None
self._write_callback = None
self._close_callback = None
self._connect_callback = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback):
"""Run ``callback`` when we read the given regex pattern.
The callback will get the data read (including the data that
matched the regex and anything that came before it) as an argument.
"""
self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._try_inline_read()
def read_until(self, delimiter, callback):
"""Run ``callback`` when we read the given delimiter.
The callback will get the data read (including the delimiter)
as an argument.
"""
self._set_read_callback(callback)
self._read_delimiter = delimiter
self._try_inline_read()
def read_bytes(self, num_bytes, callback, streaming_callback=None):
"""Run callback when we read the given number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the argument to the final
``callback`` will be empty. Otherwise, the ``callback`` gets
the data as an argument.
"""
self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
def read_until_close(self, callback, streaming_callback=None):
"""Reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the argument to the final
``callback`` will be empty. Otherwise, the ``callback`` gets the
data as an argument.
Subject to ``max_buffer_size`` limit from `IOStream` constructor if
a ``streaming_callback`` is not used.
"""
self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_callback(self._streaming_callback,
self._consume(self._read_buffer_size))
self._run_callback(self._read_callback,
self._consume(self._read_buffer_size))
self._streaming_callback = None
self._read_callback = None
return
self._read_until_close = True
self._streaming_callback = stack_context.wrap(streaming_callback)
self._try_inline_read()
def write(self, data, callback=None):
"""Write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
"""
assert isinstance(data, bytes_type)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
if len(data) > WRITE_BUFFER_CHUNK_SIZE:
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i +
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-ytopt/package.py
|
Python
|
lgpl-2.1
| 1,360 | 0.003676 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyYtopt(PythonPackage):
"""Ytopt package implements search using Random Forest (SuRF), an autotuning
search method developed within Y-Tune ECP project."""
maintainers = ['Kerilk']
homepage = "https://github.com/ytopt-team/ytopt"
url = "https://github.com/ytopt-team/ytopt/archive/refs/tags/v0.0.1.tar.gz"
version('0.0.2', sha256='5a624aa678b976ff6ef867610bafcb0dfd5c8af0d880138ca5d56d3f776e6d71')
version('0.0.1', sha256='3ca616922c8e76e73f695a5ddea5dd91b0103eada726185f008343cc5cbd7744')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-scikit-learn@0.23.1', type=('build', 'run'))
depends_on('py-dh-scikit-optimize', type=('build', 'run'))
depends_on('py-configspace', type=('build', 'run'))
depends_on('py-numpy', typ
|
e=('build', 'run'))
depends_on('py-ytopt-autotune@1.1:', type=('build', 'run'))
depends_on('py-joblib', type=('build', 'run'))
depends_on('py-deap', type=('bu
|
ild', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-ray', type=('build', 'run'))
depends_on('py-mpi4py@3.0.0:', type=('build', 'run'))
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_tickvalssrc.py
|
Python
|
mit
| 461 | 0 |
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="tickvalssrc",
parent_name="scatter3d.marker.colorbar",
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("e
|
dit_type", "none"),
**kwarg
|
s
)
|
magnusax/ml-meta-wrapper
|
gazer/visualize.py
|
Python
|
mit
| 1,824 | 0.01261 |
import sys
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
class Visualizer():
|
def __init__(self, *args):
pass
def show_performance(self, list_of_tuples, fig_size=(9,9), font_scale=1.1, file=''):
"""
Parameters: list_of_tuples:
- list containing (clf_name, clf_performance) tuples for each
classifier we wish to visualize
fig_size:
- set figure size (default: (9,9))
font_s
|
cale:
- text scale in seaborn plots (default: 1.1)
file:
- string containing a valid filename (default: '')
Output: f: (matplotlib.pyplot.figure object)
"""
if not (isinstance(list_of_tuples, list) and isinstance(list_of_tuples[0], tuple)):
raise ValueError("Expecting a list of tuples")
sns.set(font_scale=font_scale)
sns.set_style("whitegrid")
data = list()
for name, value in list_of_tuples: data.append([name, value])
data = pd.DataFrame(data, columns=['classifier', 'performance'])
data.sort_values('performance', inplace=True, ascending=False)
"""
Close all figures (can close individual figure using plt.close(f)
where f is a matplotlib.pyplot.figure object)
"""
plt.close('all')
f = plt.figure(figsize=fig_size)
sns.barplot(x='performance', y='classifier', data=data)
plt.xlabel('performance')
if len(file)>1:
try:
plt.savefig(file)
except:
pass
return f
if __name__ == '__main__':
sys.exit(-1)
|
mrjmad/robotswars
|
maingame/urls.py
|
Python
|
mit
| 868 | 0.009217 |
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from .views import (AvailableMapListview, AvailableMapsDetailview,
|
index_view, MyArmiesListView, ArmyCreateView,
ArmyDetailView, RobotCreateView)
urlpatterns = patterns('',
url(r'maingame/maps$', AvailableMapListview.as_view(), name='list_available_maps'),
url(r'maingame/map/(?P<pk>\d+)$', AvailableMapsDetailview.as_view(), name="available_map_detail" ),
url(r'maingame/my_armies$', MyArmiesListView.as_view(), name='my_armies'),
url(r'mai
|
ngame/army/(?P<pk>\d+)$', ArmyDetailView.as_view(), name="army_detail" ),
url(r'maingame/create_armies$', ArmyCreateView.as_view(), name='add_army'),
url(r'maingame/create_robot$', RobotCreateView.as_view(), name='add_robot_to_army'),
url(r'^$', index_view, name="index"),
)
|
DIRACGrid/DIRAC
|
src/DIRAC/Core/scripts/dirac_version.py
|
Python
|
gpl-3.0
| 662 | 0 |
#!/usr/bin/env python
########################################################################
# File : dirac-version
# Author : Ricardo Graciani
########################################################################
"""
Print version of current DIRAC installation
Usage:
dirac-version
|
[option]
Example:
$ dirac-version
"""
import argparse
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.parse_known_args()
|
print(DIRAC.version)
if __name__ == "__main__":
main()
|
pepincho/Python101-and-Algo1-Courses
|
Algo-1/Application/1-Palindromes.py
|
Python
|
mit
| 937 | 0 |
def is_palindrome(obj):
obj = str(obj)
obj_list = list(obj)
obj_list_reversed = obj_list[::-1]
return obj_list == obj_list_reversed
def generate_rotations(word):
letters = list(word)
string_rotations = []
counter = len(letters)
temp = letters
while counter != 0:
current_letter = temp.pop(0)
temp.append(current_letter)
word = "".join(temp)
string_rotations.append(word)
counter -= 1
return string_rotations
def get_rotated_palindromes(string_rotations):
is_empty = True
for word in string_rotations:
if is_palindrome(word) is True:
print(word)
is_empty = False
|
if is_empty is True:
print("NONE")
|
def main():
user_input = input("Enter a string: ")
string_rotations = generate_rotations(user_input)
get_rotated_palindromes(string_rotations)
if __name__ == '__main__':
main()
|
wangjun/flask-paginate
|
example/app.py
|
Python
|
bsd-3-clause
| 2,185 | 0.001373 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlite3
from flask import Flask, render_template, g, current_app, request
from flask.ext.paginate import Pagination
app = Flask(__name__)
app.config.from_pyfile('app.cfg')
@app.before_request
def before_request():
g.conn = sqlite3.connect('test.db')
g.conn.row_factory = sqlite3.Row
g.cur = g.conn.cursor()
@app.teardown_request
def teardown(error):
if hasattr(g, 'conn'):
g.conn.close()
@app.route('/')
def index():
g.cur.execute('select count(*) from users')
total = g.cur.fetchone()[0]
page, per_page, offset = get_page_items()
sql = 'select name from users order by name limit {}, {}'\
.format(offset, per_page)
g.cur.execute(sql)
users = g.cur.fetchall()
pagination = get_pagination(page=page,
per_page=per_page,
total=total,
record_name='users',
)
return render_template('index.html', users=users,
page=page,
per_page=per_page,
pagination=pagination,
)
def get_css_framework():
return current_app.config.get('CSS_FRAMEWORK', 'bootstrap3')
def get_link_size():
return current_app.config.get('LINK_SIZE', 'sm')
def show_single_page_or_not():
return current_app.config.get('SHOW_SINGLE_PAGE', False)
def get_page_items():
page = int(request.args.get('page', 1))
per_page = request.args.get('per_page')
if not per_page:
per_page = current_app.config.get('PER_PAGE', 10)
else:
per_page = int(per_page)
offset
|
= (page - 1) * per_page
return page, per_page, offset
def get_pagination(**kwargs):
kwargs.setdefault('record_n
|
ame', 'records')
return Pagination(css_framework=get_css_framework(),
link_size=get_link_size(),
show_single_page=show_single_page_or_not(),
**kwargs
)
if __name__ == '__main__':
app.run(debug=True)
|
gunnery/gunnery
|
gunnery/core/views.py
|
Python
|
apache-2.0
| 7,964 | 0.001758 |
from django.contrib.contenttypes.models import ContentType
import json
from django.http import Http404, H
|
ttpResponse
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required, user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404, redirect, render
from guardian.decorators import permission_required
from guardian.shortcuts import get_objects_for_user
from account.models import DepartmentGroup
from backend.tasks import
|
TestConnectionTask
from event.models import NotificationPreferences
from .models import Application, Department, Environment, Server, ServerRole
from task.models import Execution
@login_required
def index(request):
data = {}
executions = Execution.objects.filter(task__application__department_id=request.current_department_id)
if not executions.count():
return redirect(reverse('first_steps_page'))
return render(request, 'page/index.html', data)
@permission_required('core.view_application', (Application, 'id', 'application_id'))
def application_page(request, application_id):
data = {}
data['application'] = get_object_or_404(Application, pk=application_id)
return render(request, 'page/application.html', data)
@permission_required('core.view_environment', (Environment, 'id', 'environment_id'))
def environment_page(request, environment_id):
data = {}
data['environment'] = get_object_or_404(Environment, pk=environment_id)
data['servers'] = list(Server.objects.filter(environment_id=environment_id).prefetch_related('roles'))
return render(request, 'page/environment.html', data)
@permission_required('core.view_environment', (Environment, 'servers__id', 'server_id'))
def server_test(request, server_id):
data = {}
data['server'] = get_object_or_404(Server, pk=server_id)
data['task_id'] = TestConnectionTask().delay(server_id).id
return render(request, 'partial/server_test.html', data)
@login_required
def server_test_ajax(request, task_id):
data = {}
task = TestConnectionTask().AsyncResult(task_id)
if task.status == 'SUCCESS':
status, output = task.get()
data['status'] = status
data['output'] = output
elif task.status == 'FAILED':
data['status'] = False
else:
data['status'] = None
return HttpResponse(json.dumps(data), content_type="application/json")
@login_required
def first_steps_page(request):
data = {}
return render(request, 'page/first_steps.html', data)
@login_required
def settings_page(request, section='user', subsection='profile'):
data = {}
data['section'] = section
data['subsection'] = subsection
data['department'] = Department(pk=request.current_department_id)
data['on_settings'] = True
handler = '_settings_%s_%s' % (section, subsection)
if section == 'system' and request.user.is_superuser is not True:
return redirect('index')
if section == 'department' and not request.user.has_perm('core.change_department', obj=data['department']):
return redirect('index')
if handler in globals():
data = globals()[handler](request, data)
else:
raise Http404
return render(request, 'page/settings.html', data)
def _settings_account_profile(request, data):
data['subsection_template'] = 'partial/account_profile.html'
from account.forms import account_create_form
form = account_create_form('user_profile', request, request.user.id)
form.fields['email'].widget.attrs['readonly'] = True
data['form'] = form
if request.method == 'POST':
if form.is_valid():
form.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_password(request, data):
data['subsection_template'] = 'partial/account_password.html'
from account.forms import account_create_form
form = account_create_form('user_password', request, request.user.id)
data['form'] = form
if request.method == 'POST':
if form.is_valid():
user = form.save(commit=False)
user.set_password(user.password)
user.save()
data['user'] = form.instance
messages.success(request, 'Saved')
return data
def _settings_account_notifications(request, data):
data['subsection_template'] = 'partial/account_notifications.html'
data['applications'] = get_objects_for_user(request.user, 'core.view_application')
content_type = ContentType.objects.get_for_model(Application)
if request.method == 'POST':
for application in data['applications']:
key = 'notification[%s]' % application.id
notification, created = NotificationPreferences.objects.get_or_create(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type,
object_id=application.id)
if notification.is_active != (key in request.POST):
notification.is_active = key in request.POST
notification.save()
messages.success(request, 'Saved')
data['notifications'] = NotificationPreferences.objects.filter(
user=request.user,
event_type='ExecutionFinish',
content_type=content_type.id).values_list('object_id', 'is_active')
data['notifications'] = dict(data['notifications'])
return data
def _settings_department_applications(request, data):
data['subsection_template'] = 'partial/application_list.html'
data['applications'] = Application.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['applications'].count())
return data
def _settings_department_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
from guardian.shortcuts import get_users_with_perms
department = Department.objects.get(pk=request.current_department_id)
data['users'] = get_users_with_perms(department).prefetch_related('groups__departmentgroup').order_by('name')
data['department_user_list'] = True
data['form_name'] = 'user'
return data
def _settings_department_groups(request, data):
data['subsection_template'] = 'partial/group_list.html'
data['groups'] = DepartmentGroup.objects.filter(department_id=request.current_department_id)
return data
def _settings_department_serverroles(request, data):
data['subsection_template'] = 'partial/serverrole_list.html'
data['serverroles'] = ServerRole.objects.filter(department_id=request.current_department_id)
data['empty'] = not bool(data['serverroles'].count())
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_departments(request, data):
data['subsection_template'] = 'partial/department_list.html'
data['departments'] = Department.objects.all()
return data
@user_passes_test(lambda u: u.is_superuser)
def _settings_system_users(request, data):
data['subsection_template'] = 'partial/user_list.html'
data['users'] = get_user_model().objects.exclude(id=-1).prefetch_related('groups__departmentgroup__department').order_by('name')
data['form_name'] = 'usersystem'
return data
def department_switch(request, id):
department = get_object_or_404(Department, pk=id)
if request.user.has_perm('core.view_department', department):
request.session['current_department_id'] = int(id)
else:
messages.error(request, 'Access forbidden')
return redirect('index')
def handle_403(request):
print 'aaaaaaaa'
messages.error(request, 'Access forbidden')
return redirect('index')
|
mezz64/home-assistant
|
homeassistant/components/acmeda/sensor.py
|
Python
|
apache-2.0
| 1,666 | 0 |
"""Support for Acmeda Roller Blind Batteries."""
from __future__ import annotations
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import PERCENTAGE
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .base import AcmedaBase
from .const import ACMEDA_HUB_UPDATE, DOMAIN
from .helpers import async_add_acmeda_entities
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Acmeda Rollers from a config entry."""
hub = hass.data[DOMAIN][config_entry.entry_id]
current: set[int] = set()
@callback
def async_add_acmeda_sensors():
async_add_acmeda_entities(
hass, AcmedaBattery, config_entry, current, async_add_entities
)
hub.cleanup_callbacks.append(
async_dispatcher_connect(
|
hass,
ACMEDA_HUB_UPDATE.format(config_entry.entry_id),
async_add_acmeda_sensors,
)
)
class AcmedaBattery(AcmedaBase, SensorEntity):
"""Representation of a Acmeda cover device."""
device_class = SensorDeviceClass.BATTERY
_attr_native_unit_of_measurement = PERCENTAGE
@property
def name(self):
|
"""Return the name of roller."""
return f"{super().name} Battery"
@property
def native_value(self):
"""Return the state of the device."""
return self.roller.battery
|
ashcrow/etcdobj
|
setup.py
|
Python
|
bsd-3-clause
| 2,107 | 0.000475 |
#!/usr/bin/env python
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY W
|
AY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Python setup script.
"""
from setuptools import setup, find_packages
def extract_requirements(filename):
with open(filename, 'r') as requirements_file:
return [x[:-1] for x in requirements_file.readlines()]
install_requires = extract_requirements('requirements.txt')
test_require = extract_requirements('test-requirements.txt')
setup(
name='etcdobj',
version='0.0.0',
|
description='Basic ORM for etcd',
author='Steve Milner',
url='https://github.com/ashcrow/etcdobj',
license="MBSD",
install_requires=install_requires,
tests_require=test_require,
package_dir={'': 'src'},
packages=find_packages('src'),
)
|
subramani95/neutron
|
neutron/plugins/plumgrid/plumgrid_plugin/plumgrid_plugin.py
|
Python
|
apache-2.0
| 24,717 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 PLUMgrid, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Edgar Magana, emagana@plumgrid.com, PLUMgrid, Inc.
"""
Neutron Plug-in for PLUMgrid Virtual Networking Infrastructure (VNI)
This plugin will forward authenticated REST API calls
to the PLUMgrid Network Management System called Director
"""
import netaddr
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.db import db_base_plugin_v2
from neutron.db import external_net_db
from neutron.db import l3_db
from neutron.db import portbindings_db
from neutron.db import quota_db # noqa
from neutron.extensions import portbindings
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.plugins.plumgrid.common import exceptions as plum_excep
from neutron.plugins.plumgrid.plumgrid_plugin import plugin_ver
LOG = logging.getLogger(__name__)
director_server_opts = [
cfg.StrOpt('director_server', default='localhost',
help=_("PLUMgrid Director server to connect to")),
cfg.StrOpt('director_server_port', default='8080',
help=_("PLUMgrid Director server port to connect to")),
cfg.StrOpt('username', default='username',
help=_("PLUMgrid Director admin username")),
cfg.StrOpt('password', default='password', secret=True,
help=_("PLUMgrid Director admin password")),
cfg.IntOpt('servertimeout', default=5,
help=_("PLUMgrid Director server timeout")),
cfg.StrOpt('driver',
default="neutron.plugins.plumgrid.drivers.plumlib.Plumlib",
help=_("PLUMgrid Driver")), ]
cfg.CONF.register_opts(director_server_opts, "plumgriddirector")
class NeutronPluginPLUMgridV2(db_base_plugin_v2.NeutronDbPluginV2,
portbindings_db.PortBindingMixin,
external_net_db.External_net_db_mixin,
l3_db.L3_NAT_db_mixin):
supported_extension_aliases = ["external-net", "router", "binding",
"quotas", "provider"]
binding_view = "extension:port_binding:view"
binding_set = "extension:port_binding:set"
def __init__(self):
LOG.info(_('Neutron PLUMgrid Director: Starting Plugin'))
super(NeutronPluginPLUMgridV2, self).__init__()
self.plumgrid_init()
LOG.debug(_('Neutron PLUMgrid Director: Neutron server with '
'PLUMgrid Plugin has started'))
def plumgrid_init(self):
"""PLUMgrid initialization."""
director_plumgrid = cfg.CONF.plumgriddirector.director_server
director_port = cfg.CONF.plumgriddirector.director_server_port
director_admin = cfg.CONF.plumgriddirector.username
director_password = cfg.CONF.plumgriddirector.password
timeout = cfg.CONF.plumgriddirector.servertimeout
plum_driver = cfg.CONF.plumgriddirector.driver
# PLUMgrid Director info validation
LOG.info(_('Neutron PLUMgrid Director: %s'), director_plumgrid)
self._plumlib = importutils.import_object(plum_driver)
self._plumlib.director_conn(director_plumgrid, director_port, timeout,
director_admin, director_password)
def create_network(self, context, network):
"""Create Neutron network.
Creates a PLUMgrid-based bridge.
"""
LOG.debug(_('Neutron PLUMgrid Director: create_network() called'))
# Plugin DB - Network Create and validation
tenant_id = self._get_tenant_id_for_create(context,
network["network"])
self._network_admin_state(network)
with context.session.begin(subtransactions=True):
net_db = super(NeutronPluginPLUMgridV2,
self).create_network(context, network)
# Propagate all L3 data into DB
self._process_l3_create(context, net_db, network['network'])
try:
LOG.debug(_('PLUMgrid Library: create_network() called'))
self._plumlib.create_network(tenant_id, net_db, network)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return created network
return net_db
def update_network(self, context, net_id, network):
"""Update Neutron network.
Updates a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: update_network() called"))
self._network_admin_state(network)
tenant_id = self._get_tenant_id_for_create(context, network["network"])
with context.session.begin(subtransactions=True):
# Plugin DB - Network Update
net_db = super(
NeutronPluginPLUMgridV2, self).update_network(context,
net_id, network)
self._process_l3_update(context, net_db, network['network'])
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.update_network(tenant_id, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
# Return updated network
return net_db
def delete_network(self, context, net_id):
"""Delete Neutron network.
Deletes a PLUMgrid-based bridge.
"""
LOG.debug(_("Neutron PLUMgrid Director: delete_network() called"))
net_db = super(NeutronPluginPLUMgridV2,
self).get_network(context, net_id)
with context.session.begin(subtransactions=True):
self._process_l3_delete(context, net_id)
# Plugin DB - Network Delete
super(NeutronPluginPLUMgridV2, self).delete_network(context,
net_id)
try:
LOG.debug(_("PLUMgrid Library: update_network() called"))
self._plumlib.delete_network(net_db, net_id)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_msg=err_message)
def create_port(self, context, port):
"""Create Neutron port.
Creates a PLUMgrid-based port on the specific Virtual Network
Function (VNF).
"""
LOG.debug(_("Neutron PLUMgrid Director: create_port() called"))
# Port operations on PLUMgrid Director is an automatic operation
# from the VIF driver operations in Nova.
# It requires admin_state_u
|
p to be True
port["port"]["admin_state_up"] = True
with context.session.begin(subtransactions=True):
# Plugin DB - Port
|
Create and Return port
port_db = super(NeutronPluginPLUMgridV2, self).create_port(context,
port)
device_id = port_db["device_id"]
if port_db["device_owner"] == constants.DEVICE_OWNER_ROUTER_GW:
router_db = self._get_router(context, device_id)
else:
router_db = None
try:
LOG.debug(_("PLUMgrid Library: create_port() called"))
self._plumlib.create_port(port_db, router_db)
except Exception as err_message:
raise plum_excep.PLUMgridException(err_
|
saltstack/salt
|
tests/pytests/unit/test_minion.py
|
Python
|
apache-2.0
| 35,098 | 0.001197 |
import copy
import logging
import os
import pytest
import salt.ext.tornado
import salt.ext.tornado.gen
import salt.ext.tornado.testing
import salt.minion
import salt.syspaths
import salt.utils.crypt
import salt.utils.event as event
import salt.utils.platform
import salt.utils.process
from salt._compat import ipaddress
from salt.exceptions import SaltClientError, SaltMasterUnresolvableError, SaltSystemExit
from tests.support.mock import MagicMock, patch
log = logging.getLogger(__name__)
def test_minion_load_grains_false():
"""
Minion does not generate grains when load_grains is False
"""
opts = {"random_startup_delay": 0, "grains": {"foo": "bar"}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts, load_grains=False)
assert minion.opts["grains"] == opts["grains"]
grainsfunc.assert_not_called()
def test_minion_load_grains_true():
"""
Minion generates grains when load_grains is True
"""
opts = {"random_startup_delay": 0, "grains": {}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts, load_grains=True)
assert minion.opts["grains"] != {}
grainsfunc.assert_called()
def test_minion_load_grains_default():
"""
Minion load_grains defaults to True
"""
opts = {"random_startup_delay": 0, "grains": {}}
with patch("salt.loader.grains") as grainsfunc:
minion = salt.minion.Minion(opts)
assert minion.opts["grains"] != {}
grainsfunc.assert_called()
@pytest.mark.parametrize(
"event",
[
(
"fire_event",
lambda data, tag, cb=None, timeout=60: True,
),
(
"fire_event_async",
lambda data, tag, cb=None, timeout=60: salt.ext.tornado.gen.maybe_future(
True
),
),
],
)
def test_send_req_fires_completion_event(event):
event_enter = MagicMock()
event_enter.send.side_effect = event[1]
event = MagicMock()
event.__enter__.return_value = event_enter
with patch("salt.utils.event.get_event", return_value=event):
opts = salt.config.DEFAULT_MINION_OPTS.copy()
opts["random_startup_delay"] = 0
opts["return_retry_tries"] = 30
opts["grains"] = {}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
load = {"load": "value"}
timeout = 60
if "async" in event[0]:
rtn = minion._send_req_async(load, timeout).result()
else:
rtn = minion._send_req_sync(load, timeout)
# get the
for idx, call in enumerate(event.mock_calls, 1):
if "fire_event" in call[0]:
condition_event_tag = (
len(call.args) > 1
and call.args[1] == "__master_req_channel_payload"
)
condition_event_tag_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.args[1], "__master_req_channel_payload"
)
condition_timeout = (
len(call.kwargs) == 1 and call.kwargs["timeout"] == timeout
)
condition_timeout_error = "{} != {}; Call(number={}): {}".format(
idx, call, call.kwargs["timeout"], timeout
)
fire_event_called = True
assert condition_event_tag, condition_event_tag_error
assert condition_timeout, condition_timeout_error
assert fire_event_called
assert rtn
@patch("salt.channel.client.ReqChannel.factory")
def test_mine_send_tries(req_channel_factory):
channel_enter = MagicMock()
channel_enter.send.side_effect = lambda load, timeout, tries: tries
channel = MagicMock()
channel.__enter__.return_value = channel_enter
req_channel_factory.return_value = channel
opts = {
"random_startup_delay": 0,
"grains": {},
"return_retry_tries": 20,
"minion_sign_messages": False,
}
with patch("salt.loader.grains"):
minion = salt.minion.Minion(opts)
minion.tok = "token"
data = {}
tag = "tag"
rtn = minion._mine_send(tag, data)
assert rtn == 20
def test_invalid_master_address():
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": float("127.0"),
"master_port": "4555",
"retry_dns": False,
},
):
pytest.raises(SaltSystemExit, salt.minion.resolve_dns, opts)
def test_source_int_name_local():
"""
test when file_client local and
source_interface_name is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": True,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ip": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
}
@pytest.mark.slow_test
def test_source_int_name_remote():
"""
test when file_client remote and
source_interface_name is set and
interface is down
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"in
|
et": [
|
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "remote",
"source_interface_name": "bond0.1234",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
assert salt.minion.resolve_dns(opts) == {
"master_ip": "127.0.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
"master_uri": "tcp://127.0.0.1:4555",
}
@pytest.mark.slow_test
def test_source_address():
"""
test when source_address is set
"""
interfaces = {
"bond0.1234": {
"hwaddr": "01:01:01:d0:d0:d0",
"up": False,
"inet": [
{
"broadcast": "111.1.111.255",
"netmask": "111.1.0.0",
"label": "bond0",
"address": "111.1.0.1",
}
],
}
}
opts = salt.config.DEFAULT_MINION_OPTS.copy()
with patch.dict(
opts,
{
"ipv6": False,
"master": "127.0.0.1",
"master_port": "4555",
"file_client": "local",
"source_interface_name": "",
"source_address": "111.1.0.1",
"source_ret_port": 49017,
"source_publish_port": 49018,
},
), patch("salt.utils.network.interfaces", MagicMock(return_value=interfaces)):
|
andela-kanyanwu/food-bot-review
|
rtmbot.py
|
Python
|
mit
| 7,049 | 0.001844 |
#!/usr/bin/env python
import sys
# sys.dont_write_bytecode = True
import glob
import os
import time
import logging
import os.path
from argparse import ArgumentParser
class RtmBot(object):
def __init__(self, token):
self.last_ping = 0
self.token = token
self.bot_plugins = []
self.slack_client = None
def connect(self):
"""Convenience method that creates Server instance"""
from slackclient import SlackClient
self.slack_client = SlackClient(self.token)
|
self.slack_client.rtm_connect()
def start(self):
self.connect()
self.load_plugins()
while True:
for reply in self.slack_client.rtm_re
|
ad():
self.input(reply)
self.crons()
self.output()
self.autoping()
time.sleep(.5)
def autoping(self):
# hardcode the interval to 3 seconds
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def input(self, data):
if "type" in data:
function_name = "process_" + data["type"]
logging.debug("got {}".format(function_name))
for plugin in self.bot_plugins:
plugin.register_jobs()
plugin.do(function_name, data)
def output(self):
for plugin in self.bot_plugins:
limiter = False
for output in plugin.do_output():
channel = self.slack_client.server.channels.find(output[0])
if channel != None and output[1] != None:
if limiter == True:
time.sleep(.1)
limiter = False
message = output[1].encode('ascii', 'ignore')
channel.send_message("{}".format(message))
limiter = True
def crons(self):
for plugin in self.bot_plugins:
plugin.do_jobs()
def load_plugins(self):
for plugin in glob.glob(directory + '/plugins/*'):
sys.path.insert(0, plugin)
sys.path.insert(0, directory + '/plugins/')
for plugin in glob.glob(directory + '/plugins/*.py') + glob.glob(
directory + '/plugins/*/*.py'):
logging.info(plugin)
name = plugin.split('/')[-1][:-3]
try:
self.bot_plugins.append(Plugin(name))
except:
import traceback
traceback_msg = traceback.format_exc()
logging.error("error loading plugin {name} {traceback_msg}".format(name=name, traceback_msg=traceback_msg))
class Plugin(object):
def __init__(self, name, plugin_config={}):
self.name = name
self.jobs = []
self.module = __import__(name)
self.register_jobs()
self.outputs = []
if name in config:
logging.info("config found for: " + name)
self.module.config = config[name]
if 'setup' in dir(self.module):
self.module.setup()
def register_jobs(self):
if 'crontable' in dir(self.module):
for interval, function in self.module.crontable:
self.jobs.append(Job(interval, eval("self.module." + function)))
logging.info(self.module.crontable)
self.module.crontable = []
else:
self.module.crontable = []
def do(self, function_name, data):
if function_name in dir(self.module):
# this makes the plugin fail with stack trace in debug mode
if not debug:
try:
eval("self.module." + function_name)(data)
except:
logging.debug("problem in module {} {}".format(function_name, data))
else:
eval("self.module." + function_name)(data)
if "catch_all" in dir(self.module):
try:
self.module.catch_all(data)
except:
logging.debug("problem in catch all")
def do_jobs(self):
for job in self.jobs:
job.check()
def do_output(self):
output = []
while True:
if 'outputs' in dir(self.module):
if len(self.module.outputs) > 0:
logging.info("output from {}".format(self.module))
output.append(self.module.outputs.pop(0))
else:
break
else:
self.module.outputs = []
return output
class Job(object):
def __init__(self, interval, function):
self.function = function
self.interval = interval
self.lastrun = 0
def __str__(self):
return "{} {} {}".format(self.function, self.interval, self.lastrun)
def __repr__(self):
return self.__str__()
def check(self):
if self.lastrun + self.interval < time.time():
if not debug:
try:
self.function()
except:
logging.debug("problem")
else:
self.function()
self.lastrun = time.time()
pass
class UnknownChannel(Exception):
pass
def main_loop():
if "LOGFILE" in config:
logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO,
format='%(asctime)s %(message)s')
logging.info(directory)
try:
bot.start()
except KeyboardInterrupt:
sys.exit(0)
except:
logging.exception('OOPS')
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--config',
help='Full path to config file.',
metavar='path'
)
return parser.parse_args()
if __name__ == "__main__":
try:
from config import Config
args = parse_args()
directory = os.path.dirname(sys.argv[0])
if not directory.startswith('/'):
directory = os.path.abspath("{}/{}".format(os.getcwd(),
directory
))
config = Config()
if os.path.exists('./rtmbot.conf'):
config.load_yaml(args.config or 'rtmbot.conf')
else:
config.load_os_environ_vars('FB__')
logging.basicConfig(stream=sys.stdout, filename='debug.log',
level=logging.DEBUG if config["DEBUG"] else logging.INFO)
logging.info('Bot is')
token = config["SLACK_TOKEN"]
debug = config["DEBUG"]
bot = RtmBot(token)
site_plugins = []
files_currently_downloading = []
job_hash = {}
if config["DAEMON"] in ['True', True]:
import daemon
with daemon.DaemonContext():
main_loop()
else:
main_loop()
except:
import traceback
print traceback.format_exc()
|
CERNDocumentServer/invenio
|
modules/miscutil/lib/mailutils.py
|
Python
|
gpl-2.0
| 22,698 | 0.001983 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mail sending utilities. send_email() is the main API function
people should be using; just check out its docstring.
"""
__revision__ = "$Id$"
import os
import re
import sys
from cStringIO import StringIO
from time import sleep
import smtplib
import socket
from email import Encoders
from email.Header import Header
from email.MIMEBase import MIMEBase
from email.MIMEImage import MIMEImage
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.Utils import formatdate
from formatter import DumbWriter, AbstractFormatter
from invenio.access_control_config import CFG_TEMP_EMAIL_ADDRESS
from invenio.config import \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_NAME_INTL, \
CFG_SITE_NAME, \
CFG_SITE_ADMIN_EMAIL, \
CFG_MISCUTIL_SMTP_HOST, \
CFG_MISCUTIL_SMTP_PORT, \
CFG_VERSION, \
CFG_DEVEL_SITE
from invenio.errorlib import register_exception
from invenio.messages import wash_language, gettext_set_language
from invenio.miscutil_config import InvenioMiscUtilError
from invenio.textutils import guess_minimum_encoding
try:
from invenio.config import \
CFG_MISCUTIL_SMTP_USER,\
CFG_MISCUTIL_SMTP_PASS,\
CFG_MISCUTIL_SMTP_TLS
except ImportError:
CFG_MISCUTIL_SMTP_USER = ''
CFG_MISCUTIL_SMTP_PASS = ''
CFG_MISCUTIL_SMTP_TLS = False
def scheduled_send_email(fromaddr,
toaddr,
|
subject="",
content="",
header=None,
footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
|
user=None,
other_bibtasklet_arguments=None,
replytoaddr="",
bccaddr="",
):
"""
Like send_email, but send an email via the bibsched
infrastructure.
@param fromaddr: sender
@type fromaddr: string
@param toaddr: list of receivers
@type toaddr: string (comma separated) or list of strings
@param subject: the subject
@param content: the body of the message
@param header: optional header, otherwise default is used
@param footer: optional footer, otherwise default is used
@param copy_to_admin: set to 1 in order to send email the admins
@param attempt_times: try at least n times before giving up sending
@param attempt_sleeptime: number of seconds to sleep between two attempts
@param user: the user name to user when scheduling the bibtasklet. If
None, the sender will be used
@param other_bibtasklet_arguments: other arguments to append to the list
of arguments to the call of task_low_level_submission
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: the scheduled bibtasklet
"""
from invenio.bibtask import task_low_level_submission
if not isinstance(toaddr, (unicode, str)):
toaddr = ','.join(toaddr)
if not isinstance(replytoaddr, (unicode, str)):
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
if user is None:
user = fromaddr
if other_bibtasklet_arguments is None:
other_bibtasklet_arguments = []
else:
other_bibtasklet_arguments = list(other_bibtasklet_arguments)
if not header is None:
other_bibtasklet_arguments.extend(("-a", "header=%s" % header))
if not footer is None:
other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_send_email",
"-a", "fromaddr=%s" % fromaddr,
"-a", "toaddr=%s" % toaddr,
"-a", "replytoaddr=%s" % replytoaddr,
"-a", "subject=%s" % subject,
"-a", "content=%s" % content,
"-a", "copy_to_admin=%s" % copy_to_admin,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
"-a", "bccaddr=%s" % bccaddr,
*other_bibtasklet_arguments)
def send_email(fromaddr,
toaddr,
subject="",
content="",
html_content='',
html_images=None,
header=None,
footer=None,
html_header=None,
html_footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
debug_level=0,
ln=CFG_SITE_LANG,
charset=None,
replytoaddr="",
attachments=None,
bccaddr="",
forward_failures_to_admin=True,
):
"""Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly
header and footer.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ','). BEWARE: If more than once receiptiant is given,
the receivers are put in BCC and To will be "Undisclosed.Recipients:".
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] header to add, None for the Default
@param footer: [string] footer to add, None for the Default
@param html_header: [string] header to add to the html part, None for the Default
@param html_footer: [string] footer to add to the html part, None for the Default
@param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers
@param attempt_times: [int] number of tries
@param attempt_sleeptime: [int] seconds in between tries
@param debug_level: [int] debug level
@param ln: [string] invenio language
@param charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@param forward_failures_to_admin: [bool] prevents infinite recursion
in case of admin reporting,
when the problem is not in
the e-mail address format,
but rather in the network
If
|
Diyago/Machine-Learning-scripts
|
DEEP LEARNING/segmentation/Kaggle TGS Salt Identification Challenge/v2/modules/build.py
|
Python
|
apache-2.0
| 544 | 0 |
import os
from torch.utils.ffi import create_extension
sources = ["src/lib_cffi.cpp"]
headers = ["src/lib_cffi.h"]
extra_objects = ["src/bn.o"]
with_cuda = True
t
|
his_file = os.path.dirname(os.path.realpath(__file__))
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
"_ext",
headers=hea
|
ders,
sources=sources,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects,
extra_compile_args=["-std=c++11"],
)
if __name__ == "__main__":
ffi.build()
|
Programmica/pygtk-tutorial
|
examples/socket.py
|
Python
|
cc0-1.0
| 572 | 0.005245 |
#!/usr/bin/env python
import gtk, sys, string
class Socket:
def __init__(self):
window = gtk.Window()
window.set_default_size(200, 200)
socket = gtk.Socket()
window.add(socket)
|
pr
|
int "Socket ID:", socket.get_id()
if len(sys.argv) == 2:
socket.add_id(long(sys.argv[1]))
window.connect("destroy", gtk.main_quit)
socket.connect("plug-added", self.plugged_event)
window.show_all()
def plugged_event(self, widget):
print "A plug has been inserted."
Socket()
gtk.main()
|
louyihua/edx-platform
|
common/test/acceptance/tests/video/test_video_events.py
|
Python
|
agpl-3.0
| 13,984 | 0.002932 |
"""Ensure videos emit proper events"""
import datetime
import json
from nose.plugins.attrib import attr
import ddt
from common.test.acceptance.tests.helpers import EventsTestMixin
from common.test.acceptance.tests.video.test_video_module import VideoBaseTest
from common.test.acceptance.pages.lms.video.video import _parse_time_str
from openedx.core.lib.tests.assertions.events import assert_event_matches, assert_events_equal
from opaque_keys.edx.keys import UsageKey, CourseKey
class VideoEventsTestMixin(EventsTestMixin, VideoBaseTest):
"""
Useful helper methods to test video player event emission.
"""
def assert_payload_contains_ids(self, video_event):
"""
Video events should all contain "id" and "code" attributes in their payload.
This function asserts that those fields are present and have correct values.
"""
video_descriptors = self.course_fixture.get_nested_xblocks(category='video')
video_desc = video_descriptors[0]
video_locator = UsageKey.from_string(video_desc.locator)
expected_event = {
'event': {
'id': video_locator.html_id(),
'code': '3_yD_cEKoCk'
}
}
self.assert_events_match([expected_event], [video_event])
def assert_valid_control_event_at_time(self, video_event, time_in_seconds):
"""
Video control events should contain valid ID fields and a valid "currentTime" field.
This function asserts that those fields are present and have correct values.
"""
current_time = json.loads(video_event['event'])['currentTime']
self.assertAlmostEqual(current_time, time_in_seconds, delta=1)
def assert_field_type(self, event_dict, field, field_type):
"""Assert that a particular `field` in the `event_dict` has a particular type"""
self.assertIn(field, event_dict, '{0} not found in the root of the event'.format(field))
self.assertTrue(
isinstance(event_dict[field], field_type),
'Expected "{key}" to be a "{field_type}", but it has the value "{value}" of type "{t}"'.format(
key=field,
value=event_dict[field],
t=type(event_dict[field]),
field_type=field_type,
)
)
class VideoEventsTest(VideoEventsTestMixin):
""" Test video player event emission """
def test_video_control_events(self):
"""
Scenario: Video component is rendered in the LMS in Youtube mode without HTML5 sources
Given the course has a Video component in "Youtube" mode
And I play the video
|
And I watch 5 seconds of it
And I pause the video
Then a "load_video" event is emitted
And a "play_video" event is emitted
And a "pause_video" event is emitted
"""
def is_video_event(event):
"""Filter out anything other than the video events of interest"""
return event['event_type'] in ('load_video', 'play_video', 'pause_video
|
')
captured_events = []
with self.capture_events(is_video_event, number_of_matches=3, captured_events=captured_events):
self.navigate_to_video()
self.video.click_player_button('play')
self.video.wait_for_position('0:05')
self.video.click_player_button('pause')
for idx, video_event in enumerate(captured_events):
self.assert_payload_contains_ids(video_event)
if idx == 0:
assert_event_matches({'event_type': 'load_video'}, video_event)
elif idx == 1:
assert_event_matches({'event_type': 'play_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, 0)
elif idx == 2:
assert_event_matches({'event_type': 'pause_video'}, video_event)
self.assert_valid_control_event_at_time(video_event, self.video.seconds)
def test_strict_event_format(self):
"""
This test makes a very strong assertion about the fields present in events. The goal of it is to ensure that new
fields are not added to all events mistakenly. It should be the only existing test that is updated when new top
level fields are added to all events.
"""
captured_events = []
with self.capture_events(lambda e: e['event_type'] == 'load_video', captured_events=captured_events):
self.navigate_to_video()
load_video_event = captured_events[0]
# Validate the event payload
self.assert_payload_contains_ids(load_video_event)
# We cannot predict the value of these fields so we make weaker assertions about them
dynamic_string_fields = (
'accept_language',
'agent',
'host',
'ip',
'event',
'session'
)
for field in dynamic_string_fields:
self.assert_field_type(load_video_event, field, basestring)
self.assertIn(field, load_video_event, '{0} not found in the root of the event'.format(field))
del load_video_event[field]
# A weak assertion for the timestamp as well
self.assert_field_type(load_video_event, 'time', datetime.datetime)
del load_video_event['time']
# Note that all unpredictable fields have been deleted from the event at this point
course_key = CourseKey.from_string(self.course_id)
static_fields_pattern = {
'context': {
'course_id': unicode(course_key),
'org_id': course_key.org,
'path': '/event',
'user_id': self.user_info['user_id']
},
'event_source': 'browser',
'event_type': 'load_video',
'username': self.user_info['username'],
'page': self.browser.current_url,
'referer': self.browser.current_url,
'name': 'load_video',
}
assert_events_equal(static_fields_pattern, load_video_event)
@attr(shard=8)
@ddt.ddt
class VideoBumperEventsTest(VideoEventsTestMixin):
""" Test bumper video event emission """
# helper methods
def watch_video_and_skip(self):
"""
Wait 5 seconds and press "skip" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('skip_bumper')
def watch_video_and_dismiss(self):
"""
Wait 5 seconds and press "do not show again" button.
"""
self.video.wait_for_position('0:05')
self.video.click_player_button('do_not_show_again')
def wait_for_state(self, state='finished'):
"""
Wait until video will be in given state.
Finished state means that video is played to the end.
"""
self.video.wait_for_state(state)
def add_bumper(self):
"""
Add video bumper to the course.
"""
additional_data = {
u'video_bumper': {
u'value': {
"transcripts": {},
"video_id": "video_001"
}
}
}
self.course_fixture.add_advanced_settings(additional_data)
@ddt.data(
('edx.video.bumper.skipped', watch_video_and_skip),
('edx.video.bumper.dismissed', watch_video_and_dismiss),
('edx.video.bumper.stopped', wait_for_state)
)
@ddt.unpack
def test_video_control_events(self, event_type, action):
"""
Scenario: Video component with pre-roll emits events correctly
Given the course has a Video component in "Youtube" mode with pre-roll enabled
And I click on the video poster
And the pre-roll video start playing
And I watch (5 seconds/5 seconds/to the end of) it
And I click (skip/do not show again) video button
Then a "edx.video.bumper.loaded" event is emitted
And a "edx.video.bumper.played" event is emitted
And a "edx.video.bumper.skipped/dismissed/stopped" event is emitted
And a "load_video" event is emit
|
EmanueleCannizzaro/scons
|
test/QT/up-to-date.py
|
Python
|
mit
| 4,303 | 0.002789 |
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/QT/up-to-date.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Validate that a stripped-down real-world Qt configuation (thanks
to Leanid Nazdrynau) with a generated .h file is correctly
up-to-date after a build.
(This catches a bug that was introduced during a signature refactoring
ca. September 2005.)
"""
import os
import TestSCons
_obj = TestSCons._obj
test = TestSCons.TestSCons()
if not os.environ.get('QTDIR', None):
x ="External environment variable $QTDIR not set; skipping test(s).\n"
test.skip_test(x)
test.subdir('layer',
['layer', 'aclock'],
['layer', 'aclock', 'qt_bug'])
test.write('SConstruct', """\
import os
aa=os.getcwd()
env=Environment(tools=['default','expheaders','qt'],toolpath=[aa])
env["EXP_HEADER_ABS"]=os.path.join(os.getcwd(),'include')
if not os.access(env["EXP_HEADER_ABS"],os.F_OK):
os.mkdir (env["EXP_HEADER_ABS"])
Export('env')
env.SConscript('layer/aclock/qt_bug/SConscript')
""")
test.write('expheaders.py', """\
import SCons.Defaults
def ExpHeaderScanner(node, env, path):
return []
def generate(env):
HeaderAction=SCons.Action.Action([SCons.Defaults.Copy('$TARGET','$SOURCE'),SCons.Defaults.Chmod('$TARGET',0755)])
HeaderBuilder= SCons.Builder.Builder(action=HeaderAction)
env['BUILDERS']['ExportHeaders'] = HeaderBuilder
def exists(env):
return 0
""")
test.write(['layer', 'aclock', 'qt_bug', 'SConscript'], """\
import os
Import ("env")
env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'main.h'), 'main.h')
env.ExportHeaders(os.path.join(env["EXP_HEADER_ABS"],'migraform.h'), 'migraform.h')
env.Append(CPPPATH=env["EXP_HEADER_ABS"])
env.StaticLibrary('all',['main.ui','migraform.ui','my.cc'])
""")
test.write(['layer', 'aclock', 'qt_bug', 'main.ui'], """\
<!DOCTYPE UI><UI version="3.3" stdsetdef="1">
<class>Main</class>
<widget class="QWizard">
<property name="name">
<cstring>Main</cstring>
</property>
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>600</width>
<height>385</height>
</rect>
|
</property>
</widget>
<includes>
<include location="local" impldecl="in implementation">migraform.h</include>
</includes>
</UI>
""")
test.write(['layer', 'aclock', 'qt_bug', 'migraform.ui'], """\
<!DOCTYPE UI><UI version="3.3" stdsetdef="1">
<class>MigrateForm</class>
<widget class="QWizard">
<property name="name">
|
<cstring>MigrateForm</cstring>
</property>
<property name="geometry">
<rect>
<x>0</x>
<y>0</y>
<width>600</width>
<height>385</height>
</rect>
</property>
</widget>
</UI>
""")
test.write(['layer', 'aclock', 'qt_bug', 'my.cc'], """\
#include <main.h>
""")
my_obj = 'layer/aclock/qt_bug/my'+_obj
test.run(arguments = my_obj, stderr=None)
expect = my_obj.replace( '/', os.sep )
test.up_to_date(options = '--debug=explain',
arguments = (expect),
stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
bcui6611/healthchecker
|
cluster_stats.py
|
Python
|
apache-2.0
| 19,273 | 0.01349 |
import stats_buffer
import util_cli as util
class BucketSummary:
def run(self, accessor):
return stats_buffer.bucket_info
class DGMRatio:
def run(self, accessor):
result = []
hdd_total = 0
ram_total = 0
for node, nodeinfo in stats_buffer.nodes.iteritems():
if nodeinfo["StorageInfo"].has_key("hdd"):
hdd_total += nodeinfo['StorageInfo']['hdd']['usedByData']
if nodeinfo["StorageInfo"].has_key("ram"):
ram_total += nodeinfo['StorageInfo']['ram']['usedByData']
if ram_total > 0:
ratio = hdd_total / ram_total
else:
ratio = 0
return ratio
class ARRatio:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
item_avg = {
"curr_items": [],
"vb_replica_curr_items": [],
}
num_error = []
for counter in accessor["counter"]:
values = stats_info[accessor["scale"]][counter]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
item_avg[counter].append((node, avg))
res = []
active_total = replica_total = 0
for active, replica in zip(item_avg['curr_items'], item_avg['vb_replica_curr_items']):
if replica[1] == 0:
res.append((active[0], "No replica"))
else:
ratio = 1.0 * active[1] / replica[1]
res.append((active[0], util.pretty_float(ratio)))
if ratio < accessor["threshold"]:
num_error.append({"node":active[0], "value": ratio})
active_total += active[1]
replica_total += replica[1]
if replica_total == 0:
res.append(("total", "no replica"))
else:
ratio = active_total * 1.0 / replica_total
cluster += ratio
res.append(("total", util.pretty_float(ratio)))
if ratio != accessor["threshold"]:
num_error.append({"node":"total", "value": ratio})
if len(num_error) > 0:
res.append(("error", num_error))
result[bucket] = res
result["cluster"] = util.pretty_float(cluster / len(stats_buffer.buckets))
return result
class OpsRatio:
def run(self, accessor):
result = {}
for bucket, stats_info in stats_buffer.buckets.iteritems():
ops_avg = {
"cmd_get": [],
"cmd_set": [],
"delete_hits" : [],
}
for counter in accessor["counter"]:
values = stats_info[accessor["scale"]][counter]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
ops_avg[counter].append((node, avg))
res = []
read_total = write_total = del_total = 0
for read, write, delete in zip(ops_avg['cmd_get'], ops_avg['cmd_set'], ops_avg['delete_hits']):
count = read[1] + write[1] + delete[1]
if count == 0:
res.append((read[0], "0:0:0"))
else:
read_ratio = read[1] *100 / count
read_total += read_ratio
write_ratio = write[1] * 100 / count
write_total += write_ratio
del_ratio = delete[1] * 100 / count
del_total += del_ratio
res.append((read[0], "{0}:{1}:{2}".format(int(read_ratio+.5), int(write_ratio+.5), int(del_ratio+.5))))
read_total /= len(ops_avg['cmd_get'])
write_total /= len(ops_avg['cmd_set'])
del_total /= len(ops_avg['delete_hits'])
res.append(("total", "{0}:{1}:{2}".format(int(read_total+.5), int(write_total+.5), int(del_total+.5))))
result[bucket] = res
return result
class CacheMissRatio:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_i
|
nfo in stats_buffer.buckets.iteritems():
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
trend = []
total = 0
data = []
num_error = []
for node
|
, vals in nodeStats.iteritems():
#a, b = util.linreg(timestamps, vals)
value = sum(vals) / samplesCount
total += value
if value > accessor["threshold"]:
num_error.append({"node":node, "value":value})
trend.append((node, util.pretty_float(value)))
data.append(value)
total /= len(nodeStats)
trend.append(("total", util.pretty_float(total)))
trend.append(("variance", util.two_pass_variance(data)))
if len(num_error) > 0:
trend.append(("error", num_error))
cluster += total
result[bucket] = trend
if len(stats_buffer.buckets) > 0:
result["cluster"] = util.pretty_float(cluster / len(stats_buffer.buckets))
return result
class MemUsed:
def run(self, accessor):
result = {}
cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
trend = []
total = 0
data = []
for node, vals in nodeStats.iteritems():
avg = sum(vals) / samplesCount
trend.append((node, util.size_label(avg)))
data.append(avg)
#print data
trend.append(("variance", util.two_pass_variance(data)))
result[bucket] = trend
return result
class ItemGrowth:
def run(self, accessor):
result = {}
start_cluster = 0
end_cluster = 0
for bucket, stats_info in stats_buffer.buckets.iteritems():
trend = []
values = stats_info[accessor["scale"]][accessor["counter"]]
timestamps = values["timestamp"]
timestamps = [x - timestamps[0] for x in timestamps]
nodeStats = values["nodeStats"]
samplesCount = values["samplesCount"]
for node, vals in nodeStats.iteritems():
a, b = util.linreg(timestamps, vals)
if b < 1:
trend.append((node, 0))
else:
start_val = b
start_cluster += b
end_val = a * timestamps[-1] + b
end_cluster += end_val
rate = (end_val * 1.0 / b - 1.0) * 100
trend.append((node, util.pretty_float(rate) + "%"))
result[bucket] = trend
if len(stats_buffer.buckets) > 0:
rate = (end_cluster * 1.0 / start_cluster - 1.0) * 100
result["cluster"] = util.pretty_float(rate) + "%"
return result
class NumVbuckt:
def run(self, accessor):
result = {}
for bucket, stats_info in stats_buffer.buckets.iteritems():
num_error = []
values = stats_info[accessor["scale"]][accessor["counter"]]
nodeStats = values["nodeStats"]
for node, vals in nodeStats.iteritems():
if vals[-1] < accessor["threshold"]:
|
i5on9i/echoserver
|
main.py
|
Python
|
apache-2.0
| 1,161 | 0 |
"""`main` is the top level module for your Flask application."""
# Import the Flask Framework
import os
import json
from flask import Flask, request, send_from_directory, render_template
app = Flask(__name__, static_url_path='')
# Note: We don't need to call run() since our application is embedded within
# the App Engine WSGI application server.
@app.route('/')
def hello():
"
|
""Return a friendly HTTP greeting."""
return 'Hello World!'
@app.errorhandler(404)
def page_not_found(e):
"""Return a custom 404 error."""
return 'Sorry, Nothing at this URL.', 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
@app.route("/spk/json/<path:path>", methods=['POST', 'GET'])
def
|
send_js(path):
file, ext = os.path.splitext(path)
if ext == "":
ext = ".json"
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
json_url = os.path.join(SITE_ROOT, "static", "json", file + ext)
s = ''
with open(json_url) as f:
for line in f:
s += line
return s
if __name__ == '__main__':
app.run()
|
mezz64/home-assistant
|
homeassistant/components/unifiprotect/select.py
|
Python
|
apache-2.0
| 13,007 | 0.001538 |
"""This component provides select entities for UniFi Protect."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
from enum import Enum
import logging
from typing import Any, Final
from pyunifiprotect.data import (
Camera,
DoorbellMessageType,
IRLEDMode,
Light,
LightModeEnableType,
LightModeType,
Liveview,
RecordingMode,
Viewer,
)
from pyunifiprotect.data.devices import LCDMessage
from homeassistant.components.select import SelectEntity, SelectEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity import EntityCategory
from homeassistant.util.dt import utcnow
from .const import (
DOMAIN,
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
TYPE_EMPTY_VALUE,
)
from .data import ProtectData
from .entity import ProtectDeviceEntity, async_all_device_entities
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_IR = "infrared"
_KEY_REC_MODE = "recording_mode"
_KEY_VIEWER = "viewer"
_KEY_LIGHT_MOTION = "light_motion"
_KEY_DOORBELL_TEXT = "doorbell_text"
_KEY_PAIRED_CAMERA = "paired_camera"
INFRARED_MODES = [
{"id": IRLEDMode.AUTO.value, "name": "Auto"},
{"id": IRLEDMode.ON.value, "name": "Always Enable"},
{"id": IRLEDMode.AUTO_NO_LED.value, "name": "Auto (Filter Only, no LED's)"},
{"id": IRLEDMode.OFF.value, "name": "Always Disable"},
]
LIGHT_MODE_MOTION = "On Motion - Always"
LIGHT_MODE_MOTION_DARK = "On Motion - When Dark"
LIGHT_MODE_DARK = "When Dark"
|
LIGHT_MODE_OFF = "Manual"
LIGHT_MODES = [LIGHT_MODE_MOTION, LIGHT_MODE_DARK, LIGHT_MODE_OFF]
LIGHT_MODE_TO_SETTINGS = {
LIGHT_MODE_MOTION: (LightModeType.MOTION.value, LightModeEnableType.ALWAYS.value),
LIGHT_MODE_MOTION_DARK: (
LightModeType.MOT
|
ION.value,
LightModeEnableType.DARK.value,
),
LIGHT_MODE_DARK: (LightModeType.WHEN_DARK.value, LightModeEnableType.DARK.value),
LIGHT_MODE_OFF: (LightModeType.MANUAL.value, None),
}
MOTION_MODE_TO_LIGHT_MODE = [
{"id": LightModeType.MOTION.value, "name": LIGHT_MODE_MOTION},
{"id": f"{LightModeType.MOTION.value}Dark", "name": LIGHT_MODE_MOTION_DARK},
{"id": LightModeType.WHEN_DARK.value, "name": LIGHT_MODE_DARK},
{"id": LightModeType.MANUAL.value, "name": LIGHT_MODE_OFF},
]
DEVICE_RECORDING_MODES = [
{"id": mode.value, "name": mode.value.title()} for mode in list(RecordingMode)
]
DEVICE_CLASS_LCD_MESSAGE: Final = "unifiprotect__lcd_message"
@dataclass
class ProtectSelectEntityDescription(ProtectRequiredKeysMixin, SelectEntityDescription):
"""Describes UniFi Protect Select entity."""
ufp_options: list[dict[str, Any]] | None = None
ufp_enum_type: type[Enum] | None = None
ufp_set_function: str | None = None
CAMERA_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_REC_MODE,
name="Recording Mode",
icon="mdi:video-outline",
entity_category=EntityCategory.CONFIG,
ufp_options=DEVICE_RECORDING_MODES,
ufp_enum_type=RecordingMode,
ufp_value="recording_settings.mode",
ufp_set_function="set_recording_mode",
),
ProtectSelectEntityDescription(
key=_KEY_IR,
name="Infrared Mode",
icon="mdi:circle-opacity",
entity_category=EntityCategory.CONFIG,
ufp_required_field="feature_flags.has_led_ir",
ufp_options=INFRARED_MODES,
ufp_enum_type=IRLEDMode,
ufp_value="isp_settings.ir_led_mode",
ufp_set_function="set_ir_led_model",
),
ProtectSelectEntityDescription(
key=_KEY_DOORBELL_TEXT,
name="Doorbell Text",
icon="mdi:card-text",
entity_category=EntityCategory.CONFIG,
device_class=DEVICE_CLASS_LCD_MESSAGE,
ufp_required_field="feature_flags.has_lcd_screen",
ufp_value="lcd_message",
),
)
LIGHT_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_LIGHT_MOTION,
name="Light Mode",
icon="mdi:spotlight",
entity_category=EntityCategory.CONFIG,
ufp_options=MOTION_MODE_TO_LIGHT_MODE,
ufp_value="light_mode_settings.mode",
),
ProtectSelectEntityDescription(
key=_KEY_PAIRED_CAMERA,
name="Paired Camera",
icon="mdi:cctv",
entity_category=EntityCategory.CONFIG,
ufp_value="camera_id",
),
)
VIEWER_SELECTS: tuple[ProtectSelectEntityDescription, ...] = (
ProtectSelectEntityDescription(
key=_KEY_VIEWER,
name="Liveview",
icon="mdi:view-dashboard",
entity_category=None,
ufp_value="liveview",
ufp_set_function="set_liveview",
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: entity_platform.AddEntitiesCallback,
) -> None:
"""Set up number entities for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectSelects,
camera_descs=CAMERA_SELECTS,
light_descs=LIGHT_SELECTS,
viewer_descs=VIEWER_SELECTS,
)
async_add_entities(entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_DOORBELL_MESSAGE,
SET_DOORBELL_LCD_MESSAGE_SCHEMA,
"async_set_doorbell_message",
)
class ProtectSelects(ProtectDeviceEntity, SelectEntity):
"""A UniFi Protect Select Entity."""
def __init__(
self,
data: ProtectData,
device: Camera | Light | Viewer,
description: ProtectSelectEntityDescription,
) -> None:
"""Initialize the unifi protect select entity."""
assert description.ufp_value is not None
self.device: Camera | Light | Viewer = device
self.entity_description: ProtectSelectEntityDescription = description
super().__init__(data)
self._attr_name = f"{self.device.name} {self.entity_description.name}"
options = description.ufp_options
if options is not None:
self._attr_options = [item["name"] for item in options]
self._hass_to_unifi_options: dict[str, Any] = {
item["name"]: item["id"] for item in options
}
self._unifi_to_hass_options: dict[Any, str] = {
item["id"]: item["name"] for item in options
}
self._async_set_dynamic_options()
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
# entities with categories are not exposed for voice and safe to update dynamically
if self.entity_description.entity_category is not None:
_LOGGER.debug(
"Updating dynamic select options for %s", self.entity_description.name
)
self._async_set_dynamic_options()
@callback
def _async_set_dynamic_options(self) -> None:
"""Options that do not actually update dynamically.
This is due to possible downstream platforms dependencies on these options.
"""
if self.entity_description.ufp_options is not None:
return
if self.entity_description.key == _KEY_VIEWER:
options = [
{"id": item.id, "name": item.name}
for item in self.data.api.bootstrap.liveviews.values()
]
elif self.entity_description.key == _KEY_DOORBELL_TEXT:
default_message = (
self.data.api.bootstrap.nvr.doorbell_settings.default_message_text
)
messages = self.data.api.bootstrap.nvr.doorbell_settings.all_messages
built_messages = (
{"id": item.type.value, "name": item.text} for ite
|
ringly/django-postgres-dbdefaults
|
postgresql_dbdefaults/creation.py
|
Python
|
mit
| 61 | 0 |
fr
|
om django.db.backe
|
nds.postgresql.creation import * # NOQA
|
wouteroostervld/snaps
|
setup.py
|
Python
|
mit
| 465 | 0.004301 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'My Project',
'author': 'Wouter Oosterveld',
'url': 'URL to get it at.',
'download_url': 'Where to download it.',
'author_email': 'wouter@fizzyflux.nl',
'version': '0.1',
'install_requ
|
ires': ['n
|
ose','what','boto'],
'packages': ['snaps'],
'scripts': ['scripts/snaps'],
'name': 'snaps'
}
setup(**config)
|
nop33/indico
|
indico/modules/events/agreements/base.py
|
Python
|
gpl-3.0
| 7,667 | 0.002739 |
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from hashlib import sha1
from flask import render_template
from indico.modules.events.agreements.models.agreements import Agreement
from indico.modules.events.settings import EventSettingsProxy
from indico.util.caching import make_hashable, memoize_request
from indico.util.decorators import cached_classproperty, classproperty
from indico.util.i18n import _
from indico.util.string import return_ascii
from indico.web.flask.templating import get_overridable_template_name, get_template_module
class AgreementPersonInfo(object):
def __init__(self, name=None, email=None, user=None, data=None):
if user:
if not name:
name = user.full_name
if not email:
email = user.email
if not name:
raise ValueError('name is missing')
self.name = name
# Note: If you have persons with no email, you *MUST* have data that uniquely identifies such persons
self.email = email or None
self.user = user
self.data = data
@return_ascii
def __repr__(self):
return '<AgreementPersonInfo({}, {}, {})>'.format(self.name, self.email, self.
|
identifier)
@property
def identifier(self):
data_string = None
if self.data:
data_string = '-'.join('{}={}'.format(k, make_hashable(v)) for k, v in sorted(self.data.viewitems()))
identifier = '{}:{}'.format(self.email, data_string or None)
return sha1(id
|
entifier).hexdigest()
class AgreementDefinitionBase(object):
"""Base class for agreement definitions"""
#: unique name of the agreement definition
name = None
#: readable name of the agreement definition
title = None
#: optional and short description of the agreement definition
description = None
#: url to obtain the paper version of the agreement form
paper_form_url = None
#: template of the agreement form - agreement definition name by default
form_template_name = None
#: template of the email body - emails/agreement_default_body.html by default
email_body_template_name = None
#: plugin containing this agreement definition - assigned automatically
plugin = None
#: default settings for an event
default_event_settings = {'manager_notifications_enabled': True}
#: default message to display when the agreement definition type is disabled
disabled_reason = _('No signatures needed.')
@classproperty
@classmethod
def locator(cls):
return {'definition': cls.name}
@cached_classproperty
@classmethod
def event_settings(cls):
return EventSettingsProxy('agreement_{}'.format(cls.name), cls.default_event_settings)
@classmethod
def can_access_api(cls, user, event):
"""Checks if a user can list the agreements for an event"""
return event.can_manage(user)
@classmethod
def extend_api_data(cls, event, person, agreement, data): # pragma: no cover
"""Extends the data returned in the HTTP API
:param event: the event
:param person: the :class:`AgreementPersonInfo`
:param agreement: the :class:`Agreement` if available
:param data: a dict containing the default data for the agreement
"""
pass
@classmethod
def get_email_body_template(cls, event, **kwargs):
"""Returns the template of the email body for this agreement definition"""
template_name = cls.email_body_template_name or 'emails/agreement_default_body.html'
template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/')
return get_template_module(template_path, event=event)
@classmethod
@memoize_request
def get_people(cls, event):
"""Returns a dictionary of :class:`AgreementPersonInfo` required to sign agreements"""
people = cls.iter_people(event)
if people is None:
return {}
return {p.identifier: p for p in people}
@classmethod
def get_people_not_notified(cls, event):
"""Returns a dictionary of :class:`AgreementPersonInfo` yet to be notified"""
people = cls.get_people(event)
sent_agreements = {a.identifier for a in event.agreements.filter_by(type=cls.name)}
return {k: v for k, v in people.items() if v.identifier not in sent_agreements}
@classmethod
def get_stats_for_signed_agreements(cls, event):
"""Returns a digest of signed agreements on an event
:param event: the event
:return: (everybody_signed, num_accepted, num_rejected)
"""
people = cls.get_people(event)
identifiers = [p.identifier for p in people.itervalues()]
query = event.agreements.filter(Agreement.type == cls.name, Agreement.identifier.in_(identifiers))
num_accepted = query.filter(Agreement.accepted).count()
num_rejected = query.filter(Agreement.rejected).count()
everybody_signed = len(people) == (num_accepted + num_rejected)
return everybody_signed, num_accepted, num_rejected
@classmethod
def is_active(cls, event):
"""Checks if the agreement type is active for a given event"""
return bool(cls.get_people(event))
@classmethod
def is_agreement_orphan(cls, event, agreement):
"""Checks if the agreement no longer has a corresponding person info record"""
return agreement.identifier not in cls.get_people(event)
@classmethod
def render_form(cls, agreement, form, **kwargs):
template_name = cls.form_template_name or '{}.html'.format(cls.name.replace('-', '_'))
template_path = get_overridable_template_name(template_name, cls.plugin, 'events/agreements/')
return render_template(template_path, agreement=agreement, form=form, **kwargs)
@classmethod
def render_data(cls, event, data): # pragma: no cover
"""Returns extra data to display in the agreement list
If you want a column to be rendered as HTML, use a :class:`~markupsafe.Markup`
object instead of a plain string.
:param event: The event containing the agreements
:param data: The data from the :class:`AgreementPersonInfo`
:return: List of extra columns for a row
"""
return None
@classmethod
def handle_accepted(cls, agreement): # pragma: no cover
"""Handles logic on agreement accepted"""
pass
@classmethod
def handle_rejected(cls, agreement): # pragma: no cover
"""Handles logic on agreement rejected"""
pass
@classmethod
def handle_reset(cls, agreement): # pragma: no cover
"""Handles logic on agreement reset"""
pass
@classmethod
def iter_people(cls, event): # pragma: no cover
"""Yields :class:`AgreementPersonInfo` required to sign agreements"""
raise NotImplementedError
|
followyourheart/cloudly
|
vms/views.py
|
Python
|
mit
| 39,971 | 0.014586 |
# -*- coding: utf-8 -*-
import os
import time
import logging
import string
import requests
import unicodedata
import base64
try: import cPickle as pickle
except: import pickle
import datetime
from django.utils import timezone
import json
from pprint import pprint
from django.shortcuts import render_to_response
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.http import HttpResponseForbidden
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
logger = logging.getLogger(__name__)
import boto.ec2
import boto.ec2.cloudwatch
from django.contrib.auth.models import User
from userprofile.models import Profile as userprofile
from userprofile.views import _log_user_activity
from amazon import s3_funcs
from amazon import s3_funcs_shortcuts
from django.contrib.auth.decorators import login_required
from django.template.defaultfilters import filesizeformat, upper
from django.contrib.humanize.templatetags.humanize import naturalday
from cloudly.templatetags.cloud_extras import clean_ps_command
from operator import itemgetter, attrgetter, methodcaller
from cloudly.templatetags.cloud_extras import clear_filename, get_file_extension
from vms.models import Cache
import decimal
from django.db.models.base import ModelState
import pymongo
from pymongo import MongoClient
from pymongo import ASCENDING, DESCENDING
client = MongoClient('mongo', 27017)
mongo = client.cloudly
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
@login_required()
def update_session(request):
for value in request.POST:
if(value != 'secret'):
request.session[value] = request.POST[value]
request.session.modified = True
return render_to_response('ajax_null.html', locals())
@login_required()
def aws_vm_view(request,vm_name):
print '-- aws_vm_view'
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name,"aws_vm_view",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
try:
vm_cache = pickle.loads(vm_cache)[vm_name]
except:
return HttpResponse("XXX " + vm_name)
ec2_region = vm_cache['instance']['region']['name']
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
if(vms_cache.vms_console_output_cache):
console_output = vms_cache.vms_console_output_cache
else:
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
reservations = ec2conn.get_all_instances(instance_ids=[vm_name,])
instance = reservations[0].instances[0]
console_output = instance.get_console_output()
console_output = console_output.output
if(not console_output):
console_output = ""
vms_cache.vms_console_output_cache = console_output
vms_cache.save()
end = datetime.datetime.utcnow()
start = end - datetime.timedelta(minutes=60)
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
cloudwatch = boto.ec2.cloudwatch.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkIn")[0]
networkin_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="NetworkOut")[0]
networkout_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadOps")[0]
disk_readops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteOps")[0]
disk_writeops_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskReadBytes")[0]
disk_readbytes_datapoints = metric.query(start, end, 'Average', '')
metric = cloudwatch.list_metrics(dimensions={'InstanceId':vm_cache['id']}, metric_name="DiskWriteBytes")[0]
disk_writebytes_datapoints = metric.query(start, end, 'Average', '')
networkin_datapoints = json.dumps(networkin_datapoints,default=date_handler)
networkout_datapoints = json.dumps(networkout_datapoints,default=date_handler)
disk_readops_datapoints = json.dumps(disk_readops_datapoints,default=date_handler)
disk_writeops_datapoints = json.dumps(disk_writeops_datapoints,default=date_handler)
disk_readbytes_datapoints = json.dumps(disk_readbytes_datapoints,default=date_handler)
disk_writebytes_datapoints = json.dumps(disk_writebytes_datapoints,default=date_handler)
return render_to_response('aws_vm.html', {'vm_name':vm_name,'vm_cache':vm_cache,'console_output':console_output,'networkin_datapoints':networkin_datapoints,'networkout_datapoints':networkout_datapoints,'disk_readops_datapoints':disk_readops_datapoints,'disk_writeops_datapoints':disk_writeops_datapoints,'disk_readbytes_datapoints':disk_readbytes_datapoints,'disk_writebytes_datapoints':disk_writebytes_datapoints,}, context_instance=RequestContext(request))
@login_required()
def control_aws_vm(request, vm_name, action):
print request.user
user = request.user
profile = userprofile.objects.get(user=request.user)
user.last_login = datetime.datetime.now()
user.save()
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/aws/"+vm_name+"/"+action+"/","control_aws_vm",ip=ip)
vms_cache = Cache.objects.get(user=user)
vm_cache = vms_cache.vms_response
vm_cache = base64.b64decode(vm_cache)
vm_cache = pickle.loads(vm_cache)[vm_name]
if(vm_cache['user_id']!=request.user.id):
return HttpResponse("access denied")
aws_access_key = profile.aws_access_key
aws_secret_key = profile.aws_secret_key
aws_ec2_verified = profile.aws_ec2_verified
ec2_region = vm_cache['instance']['region']['name']
ec2conn = boto.ec2.connect_to_region(ec2_region,aws_access_key_id=aws_access_key,aws_secret_access_key=aws_secret_key)
if(action=="reboot
|
"):
ec2conn.reboot_instances([vm_name,])
if(action=="start"):
ec2conn.start_instances([vm_name,])
if(action=="stop"):
ec2conn.stop_instances([vm_name,])
if(action=="terminate"):
ec2conn.terminate_instances([vm_name,])
return HttpResponseRedirect("/")
@login_required()
def server_view(request, hwaddr):
print '-- server_view'
print request.user
user = request.user
profile = userpro
|
file.objects.get(user=request.user)
ip = request.META['REMOTE_ADDR']
_log_user_activity(profile,"click","/server/"+hwaddr,"server_view",ip=ip)
hwaddr_orig = hwaddr
hwaddr = hwaddr.replace('-',':')
server = mongo.servers.find_one({'secret':profile.secret,'uuid':hwaddr,})
server_status = "Running"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>20):
server_status = "Stopped"
if((datetime.datetime.utcnow()-server['last_seen']).total_seconds()>1800):
server_status = "Offline"
try:
uuid = server['uuid']
except:
return HttpResponse("access denied")
disks_usage_ = []
#disks_usage = mongo.disks_usage.find({'uuid':uuid,}).sort('_id',-1).limit(60)
#for i in disks_usage: disks_usage_.append(i)
disks_usage = disks_usage_
networking_ = []
#net
|
RevansChen/online-judge
|
Codewars/8kyu/century-from-year/Python/solution1.py
|
Python
|
mit
| 74 | 0.013514 |
# Pytho
|
n - 3.6.0
century
|
= lambda year: year // 100 + ((year % 100) > 0)
|
ellipticaldoor/dfiid
|
project/content/views.py
|
Python
|
gpl-2.0
| 5,667 | 0.02541 |
from datetime import datetime
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import View, ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView
from content.models import Sub, SubFollow, Post, Commit
from content.forms import SubForm, PostForm, CommitForm
from notify.models import Noty
from core.core import random_avatar_sub
class CreateSubView(CreateView):
template_name = 'content/sub_create.html'
form_class = SubForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.save()
obj.image = 'sub/%s.png' % (obj.slug)
obj.save()
random_avatar_sub(obj.slug)
return HttpResponseRedirect('/sub')
class SubView(ListView):
template_name = 'content/sub.html'
model = Sub
class FrontView(ListView):
template_name = 'layouts/post_list.html'
paginate_by = 4
def get(self, request, *args, **kwargs):
if request.is_ajax(): self.template_name = 'ajax/post_list.html'
return super(FrontView, self).get(request, *args, **kwargs)
def get_queryset(self):
if self.kwargs['tab'] == 'top': return Post.objects.last_commited()
else: return Post.objects.created()
def get_context_data(self, **kwargs):
context = super(FrontView, self).get_context_data(**kwargs)
context['list'] = 'portada'
context['tab_show'] = self.kwargs['tab']
if self.kwargs['tab'] == 'top': context['list_url'] = '/'
else: context['list_url'] = '/new'
return context
class SubPostListView(ListView):
template_name = 'content/sub_post_list.html'
paginate_by = 4
def get(self, request, *args, **kwargs):
if request.is_ajax(): self.template_name = 'ajax/post_list.html'
return super(SubPostListView, self).get(request, *args, **kwargs)
def get_queryset(self):
if self.kwargs['tab'] == 'top': return Post.objects.sub_last_commited(self.kwargs['sub'])
else: return Post.objects.sub_created(self.kwargs['sub'])
def get_context_data(self, **kwargs):
context = super(SubPostListView, self).get_context_data(**kwargs)
sub = Sub.objects.get(pk=self.kwargs['sub'])
user = self.request.user
if self.kwargs['tab'] == 'followers': context['followers'] = True
context['tab_show'] = self.kwargs['tab']
context['list'] = sub
context['tab'] = self.kwargs['tab']
if self.kwargs['tab'] == 'top': context['list_url'] = '/sub/%s' % sub
else: context['list_url'] = '/sub/%s/new' % sub
context['action'] = 'follow'
if user.is_authenticated():
follow_state = SubFollow.objects.by_id(sub_followid='%s>%s' % (user.pk, sub.pk))
if follow_state: context['action'] = 'unfollow'
else: context['action'] = 'follow'
return context
class PostCommitView(CreateView):
template_name = 'layouts/post_detail.html'
form_class = CommitForm
def get_context_data(self, **kwargs):
context = super(PostCommitView, self).get_context_data(**kwargs)
pk, slug = self.kwargs['pk'], self.kwargs['slug']
context['object'] = Post.objects.by_post(pk, slug)
return context
def form_valid(self, form):
if self.request.user.is_authenticated():
user = self.request.user
post = Post.objects.get(postid=self.kwargs['pk'])
obj = form.save(commit=False)
obj.create_commit(user, post)
if not obj.post.user.pk == user.pk:
noty = Noty.objects.create(user_id=obj.post.user_id, category='C', commit=obj)
noty.create_noty()
return HttpResponseRedirect(obj.get_commit_url())
else:
commit_url = '/post/%s/%s/' % (self.kwargs['pk'], self.kwargs['slug'])
return HttpResponseRedirect('/login/?next=%s' % (commit_url))
class CreatePostView(CreateView):
template_name = 'layouts/post_create.html'
form_class = PostForm
def form_valid(self, form):
obj = form.save(commit=False)
obj.user = self.request.user
obj.save()
if obj.draft: return HttpResponseRedirect('/created')
else:
obj.user.last_commited = obj.created
obj.user.save()
obj.sub.last_commited = obj.created
obj.sub.save()
obj.last_commited = obj.created
obj.save()
return HttpResponseRedirect(obj.get_absolute_url())
class UpdatePostView(UpdateView):
template_name = 'layouts/post_create.html'
form_class = PostForm
def get_queryset(self):
return Post.objects.by_user(self.request.user)
def form_valid(self, form):
obj = form.save(commit=False)
if not obj.last_commited and not obj.draft:
now = datetime.now()
obj.last_commited = now
obj.user.last_commited = now
obj.user.save()
obj.sub.last_commited = now
obj.sub.save()
obj.save()
if obj.draft: return HttpResponseRedirect('/created')
else: return HttpResponseRedirect(obj.get_absolute_url())
class PostUserCreatedView(ListView):
template_name = 'content/post_user_created.html'
def get_queryset(self):
return Post.objects.by_user(self.request.user)
class SubFollowCreate(View):
def post(self, request, *args, **kwargs):
user = self.request.user
sub_followed = self.kwargs['followed']
sub_followed_obj = SubFollow.objects.create(follower=user, sub_id=sub_followed)
sub_followed_obj.save()
sub_followed_obj.follower.sub_following_number += 1
sub_followed_obj.follower.save()
sub_followed_obj.sub.follower_number += 1
sub_followe
|
d_obj.sub.save()
return HttpResponse(status=200)
class SubFollowDelete(View):
def post(self, request, *args, **kwargs):
sub_unfollowed = self.kwargs['unfollowed']
sub_unfollowed_obj = SubFollow.objects.get(follower=self.request.user, sub_id=sub_unfollowed)
sub_unfollowed_obj.follower.sub_following_number -= 1
sub_unfollowed_obj.follower.save()
sub_unfollowed_obj.sub.follower_number -= 1
sub_unfollowed_obj.sub.save()
sub_unfollowed_ob
|
j.delete()
return HttpResponse(status=200)
|
sciCloud/OLiMS
|
report/olims_sample_received_vs_reported.py
|
Python
|
agpl-3.0
| 4,408 | 0.009301 |
# -*- coding: utf-8 -*-
import time
from openerp import api, models
import datetime
class ReportSampleReceivedvsReported(models.AbstractModel):
_name = 'report.olims.report_sample_received_vs_reported'
def _get_samples(self, samples):
datalines = {}
footlines = {}
total_received_count = 0
total_published_count = 0
for sample in samples:
# For each sample, retrieve check is has results published
# and add it to datalines
published = False
analyses = self.env['olims.analysis_request'].search([('Sample_id', '=', sample.id)])
if analyses:
for analysis in analyses:
if not (analysis.DatePublished is False):
published = True
break
datereceived = datetime.datetime.strptime(sample.D
|
ateReceived, \
"%Y-%m-%d %
|
H:%M:%S")
monthyear = datereceived.strftime("%B") + " " + datereceived.strftime(
"%Y")
received = 1
publishedcnt = published and 1 or 0
if (monthyear in datalines):
received = datalines[monthyear]['ReceivedCount'] + 1
publishedcnt = published and datalines[monthyear][
'PublishedCount'] + 1 or \
datalines[monthyear]['PublishedCount']
ratio = publishedcnt / received
dataline = {'MonthYear': monthyear,
'ReceivedCount': received,
'PublishedCount': publishedcnt,
'UnpublishedCount': received - publishedcnt,
'Ratio': ratio,
'RatioPercentage': '%02d' % (
100 * (float(publishedcnt) / float(received))) + '%'}
datalines[monthyear] = dataline
total_received_count += 1
total_published_count = published and total_published_count + 1 or total_published_count
# Footer total data
if total_received_count > 0:
ratio = total_published_count / total_received_count
else:
ratio = total_published_count / 1
try:
footline = {'ReceivedCount': total_received_count,
'PublishedCount': total_published_count,
'UnpublishedCount': total_received_count - total_published_count,
'Ratio': ratio,
'RatioPercentage': '%02d' % (100 * (
float(total_published_count) / float(
total_received_count))) + '%'
}
except:
footline = {'ReceivedCount': total_received_count,
'PublishedCount': total_published_count,
'UnpublishedCount': total_received_count - total_published_count,
'Ratio': ratio,
'RatioPercentage': '%02d' % (100 * (
float(total_published_count) / float(
1))) + '%'
}
footlines['Total'] = footline
return datalines, footlines
@api.multi
def render_html(self, data):
startdate = datetime.datetime.strptime(data['form'].get('date_from'), \
"%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S")
enddate = datetime.datetime.strptime(data['form'].get('date_to'), \
"%Y-%m-%d %H:%M:%S").strftime("%Y/%m/%d %H:%M:%S")
self.model = self.env.context.get('active_model')
docs = self.env[self.model].browse(self.env.context.get('active_id'))
samples = self.env['olims.sample'].search([('SamplingDate', '>=', startdate), \
('SamplingDate', '<=', enddate), \
('state', 'in', ['sample_received','expired','disposed'])])
samples_res, footlines= self.with_context(data['form'].get('used_context'))._get_samples(samples)
docargs = {
'doc_ids': self.ids,
'doc_model': self.model,
'data': data['form'],
'docs': docs,
'time': time,
'Samples': samples_res,
'footlines' : footlines #sum(samples_res.values())
}
return self.env['report'].render('olims.report_sample_received_vs_reported', docargs)
|
frankhale/nyana
|
nyana/plugins/SnippetViewPlugin.py
|
Python
|
gpl-3.0
| 18,007 | 0.038929 |
# SnippetViewPlugin - Provides a templated/abbreviation expansion mechanism for
# the editor.
#
# Copyright (C) 2006-2010 Frank Hale <frankhale@gmail.com>
#
# ##sandbox - irc.freenode.net
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pygtk
pygtk.require('2.0')
import gio
import gtk
import gtk.gdk
import gtksourceview2
import pango
import re
import datetime
import os.path
import xml.etree.ElementTree as ET
SNIPPET_XML = "snippets.xml"
MIME_ALIAS = {
"text/x-python" : ["python"],
"application/x-ruby" : ["ruby", "rails"]
}
# Change Date/Time formats as needed
DATE_FORMAT = "%B %d %Y"
TIME_FORMAT = "%H:%M"
DATE = datetime.datetime.now().strftime(DATE_FORMAT)
TIME = datetime.datetime.now().strftime(TIME_FORMAT)
DATETIME = "%s @ %s" % (datetime.datetime.now().strftime(DATE_FORMAT), datetime.datetime.now().strftime(TIME_FORMAT))
class Snippet:
def __init__(self):
self.language=""
self.shortcut=""
self.snippet=""
def mimetype(self):
return MIME[self.language]
class SnippetLoader:
def load_all(self):
SNIPPETS = []
root = ET.parse(SNIPPET_XML)
for snippet in root.getiterator("snippet"):
if snippet.get("language") and snippet.get("shortcut"):
snip = Snippet()
snip.language = snippet.get("language")
snip.shortcut = snippet.get("shortcut")
snip.snippet = snippet.text.strip()
SNIPPETS.append(snip)
return SNIPPETS
def load(self, language):
all_snips = self.load_all()
return [s for s in all_snips if s.language==language]
def get_common(self):
return self.load("common")
# Common snippets that are useful regardless of document, used for built in snippets
COMMON_SNIPPETS = {
"^d" : DATE, # expands to the current date supplied by the date format above
"^t" : TIME, # expands to the current time supplied by the time format above
"^dt" : DATETIME # expands to a combination of the date and time supplied by the formats above
}
BUILT_IN_SNIPPETS = []
# For each of the common snippets make a Snippet object, plug in the key,value and add it to the built in snippets
# list
for KEY,VALUE in COMMON_SNIPPETS.items():
s = Snippet()
s.shortcut = KEY
s.snippet = VALUE
s.language = "common"
BUILT_IN_SNIPPETS.append(s)
class SnippetViewPlugin(object):
metadata = {
"name" : "Snippet Source View Plugin",
"authors" : ["Frank Hale <frankhale@gmail.com>"],
"website" : "http://github.com/frankhale/nyana",
"version" : "0.6.0",
"development status" : "beta",
"date" : "31 JULY 2007",
"enabled" : True,
"short description" : "Provides abbreviation expansion via tab key",
"long description" : "Provides a snippet feature which allows one to create abbreviations that are expandable by hitting the tab key. Special variables can be inserted into the snippets to make them tabbable and provide a quick way to create code."
}
def __init__(self, editor):
self.editor = editor
self.editor.event_manager.register_listener("buffer_change", self.event_buffer_change)
self.editor.event_manager.register_listener("scroll_to_insert", self.scroll_to_insert)
self.editor.source_view.set_highlight_current_line(True)
self.editor.source_view.set_wrap_mode(gtk.WRAP_NONE)
# regular expression used to find our special variables.
#
# variables look like ${foo}
self.variable_re = re.compile('\${([^\${}]*)}')
self.SNIPPETS = []
self.SNIPPET_MARKS = []
self.SNIPPET_OFFSETS = []
self.SNIPPET_START_MARK = None
self.SNIPPET_END_MARK = None
self.IN_SNIPPET = False
self.HAS_NO_VARIABLES=False
self.TABBED = True
self.mime_type = None
self.editor.source_view.set_show_line_numbers(True)
self.editor.source_view.set_auto_indent(True)
self.editor.source_view.set_resize_mode(gt
|
k.RESIZE_PARENT)
### Comment this out if you don't want Monospace and want the default
### system font. Or change to suit your needs.
default_font = pango.FontDescription("Monospace 10")
if default_font:
self.editor.source_view.modify_font(default_font)
### -------------------------------
|
------------------------- ###
self.editor.source_view.connect("key-press-event", self.key_event)
self.editor.buff.connect("mark-set", self.mark_set)
self.SL = SnippetLoader()
self.SNIPPETS.extend(self.SL.get_common())
self.SNIPPETS.extend(BUILT_IN_SNIPPETS)
# For testing purposes.
#self.syntax_highlight(os.path.abspath("/home/majyk/dev/python/test.py"))
def load_snippets(self):
types = []
try:
types = MIME_ALIAS[self.mime_type]
except KeyError:
print "This mime-type has no snippets defined"
types=None
if not types == None:
print types
if len(types)==1:
self.SNIPPETS.extend(self.SL.load(types[0]))
elif len(types)>1:
for t in types:
self.SNIPPETS.extend(self.SL.load(t))
#print "snippets available:"
#for s in self.SNIPPETS:
# print s.shortcut
def scroll_to_insert(self, parms=None):
self.editor.source_view.scroll_mark_onscreen( self.editor.buff.get_mark("insert"))
def event_buffer_change(self, parms):
if(parms.has_key("filename") and parms.has_key("text")):
self.set_text(parms["filename"], parms["text"])
def set_text(self, filename, text):
if(filename):
self.syntax_highlight(filename)
self.editor.buff.set_text(text)
self.editor.buff.place_cursor(self.editor.buff.get_start_iter())
def mark_set(self, textbuffer, _iter, textmark):
# if we are in a snippet and the user moves the cursor out of the snippet bounds without
# finishing the snippet then we need to clean up and turn the snippet mode off
if self.IN_SNIPPET and self.SNIPPET_START_MARK and self.SNIPPET_END_MARK:
SNIPPET_START_ITER = self.editor.buff.get_iter_at_mark(self.SNIPPET_START_MARK)
SNIPPET_END_ITER = self.editor.buff.get_iter_at_mark(self.SNIPPET_END_MARK)
curr_iter = self.get_cursor_iter()
if not curr_iter.in_range(SNIPPET_START_ITER, SNIPPET_END_ITER):
if self.SNIPPET_START_MARK and self.SNIPPET_END_MARK:
self.IN_SNIPPET = False
# Do mime-type magic and switch the language syntax highlight mode and snippets
def syntax_highlight(self, filename):
if not (os.path.exists(filename)):
print "(%s) does not exist" % (filename)
return
print "filename = (%s)" % (filename)
language = self.get_language(filename)
if language:
self.editor.buff.set_highlight_syntax(True)
self.editor.buff.set_language(language)
#print "Setting the snippets to the following language mime-type: " + mime_type
self.load_snippets()
else:
print "A syntax highlight mode for this mime-type does not exist."
self.editor.buff.set_highlight_syntax(False)
def complete_special_chars(self, widget, char):
curr_iter = self.editor.buff.get_iter_at_mark( self.editor.buff.get_insert() )
self.editor.buff.insert(curr_iter, char)
curr_iter = self.editor.buff.get_iter_at_mark( self.editor.buff.get_insert() )
curr_iter.backward_chars(1)
self.editor.buff.place_cursor(curr_iter)
def get_cursor_iter(self):
cursor_mark = self.editor.buff.get_insert()
cursor_iter = self.editor.buff.get_iter_at_mark(cursor_mark)
return cursor_iter
def get_line_number(self):
cursor_iter = self.get_cursor_iter(self.editor.buff)
line_number = cursor_iter.get_line()
return line_number
# Adds marks into the buffer for the start and end offsets for each variable
def mark_variables(sel
|
angr/angr
|
angr/flirt/build_sig.py
|
Python
|
bsd-2-clause
| 10,375 | 0.00241 |
# pylint:disable=consider-using-with
from typing import List, Dict
import json
import subprocess
import argparse
import tempfile
import os
import itertools
from collections import defaultdict
import angr
UNIQUE_STRING_COUNT = 20
# strings longer than MAX_UNIQUE_STRING_LEN will be truncated
MAX_UNIQUE_STRING_LEN = 70
def get_basic_info(ar_path: str) -> Dict[str,str]:
"""
Get basic information of the archive file.
"""
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
# Load arch and OS information from the first .o file
o_files = [ f for f in os.listdir(".") if f.endswith(".o") ]
if o_files:
proj = angr.Project(o_files[0], auto_load_libs=False)
arch_name = proj.arch.name.lower()
os_name = proj.simos.name.lower()
os.chdir(cwd)
return {
'arch': arch_name,
'platform': os_name,
}
def get_unique_strings(ar_path: str) -> List[str]:
"""
For Linux libraries, this method requires ar (from binutils), nm (from binutils), and strings.
"""
# get symbols
nm_output = subprocess.check_output(["nm", ar_path])
nm_lines = nm_output.decode("utf-8").split("\n")
symbols = set()
for nm_line in nm_lines:
symbol_types = "UuVvTtRrDdWwBbNn"
for symbol_type in symbol_types:
if f" {symbol_type} " in nm_line:
# parse it
symbol = nm_line[nm_line.find(f" {symbol_type}") + 3: ].strip(" ")
if "." in symbol:
symbols |= set(symbol.split("."))
else:
symbols.add(symbol)
break
# extract the archive file into a temporary directory
all_strings = set()
with tempfile.TemporaryDirectory() as tempdirname:
cwd = os.getcwd()
os.chdir(tempdirname)
subprocess.call(["ar", "x", ar_path])
for filename in os.listdir("."):
if filename.endswith(".o"):
strings = subprocess.check_output(["strings", "-n", "8", filename])
strings = strings.decode("utf-8").split("\n")
non_symbol_strings = set()
for s in strings:
if s in symbols:
continue
if "." in s and any(subs in symbols
|
for subs in s.split(".")):
continue
# C++ specific
if "::" in s:
continue
if "_" in s:
#
|
make sure it's not a substring of any symbol
is_substring = False
for symbol in symbols:
if s in symbol:
is_substring = True
break
if is_substring:
continue
non_symbol_strings.add(s)
all_strings |= non_symbol_strings
os.chdir(cwd)
grouped_strings = defaultdict(set)
for s in all_strings:
grouped_strings[s[:5]].add(s)
sorted_strings = list(sorted(all_strings, key=len, reverse=True))
ctr = 0
picked = set()
unique_strings = [ ]
for s in sorted_strings:
if s[:5] in picked:
continue
unique_strings.append(s[:MAX_UNIQUE_STRING_LEN])
picked.add(s[:5])
ctr += 1
if ctr >= UNIQUE_STRING_COUNT:
break
return unique_strings
def run_pelf(pelf_path: str, ar_path: str, output_path: str):
subprocess.check_call([pelf_path, "-r43:0:0", ar_path, output_path])
def run_sigmake(sigmake_path: str, sig_name: str, pat_path: str, sig_path: str):
if " " not in sig_name:
sig_name_arg = f"-n{sig_name}"
else:
sig_name_arg = f"-n\"{sig_name}\""
proc = subprocess.Popen([sigmake_path, sig_name_arg, pat_path, sig_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, stderr = proc.communicate()
if b"COLLISIONS:" in stderr:
return False
return True
def process_exc_file(exc_path: str):
"""
We are doing the stupidest thing possible: For each batch of conflicts, we pick the most likely
result baed on a set of predefined rules.
TODO: Add caller-callee-based de-duplication.
"""
with open(exc_path, "r") as f:
data = f.read()
lines = data.split("\n")
# parse groups
ctr = itertools.count()
idx = 0
groups = defaultdict(dict)
for line in lines:
if line.startswith(";"):
continue
if not line:
idx = next(ctr)
else:
# parse the function name
func_name = line[:line.index("\t")].strip(" ")
groups[idx][func_name] = line
# for each group, decide the one to keep
for idx in list(groups.keys()):
g = groups[idx]
if len(g) == 1:
# don't pick anything. This is a weird case that I don't understand
continue
if all(func_name.endswith(".cold") for func_name in g):
# .cold functions. doesn't matter what we pick
continue
non_cold_names = [ ]
for func_name in g:
if func_name.endswith(".cold"):
continue
non_cold_names.append(func_name)
# sort it
non_cold_names = list(sorted(non_cold_names, key=len))
# pick the top one
the_chosen_one = non_cold_names[0]
line = g[the_chosen_one]
g[the_chosen_one] = "+" + line
# output
with open(exc_path, "w") as f:
for g in groups.values():
for line in g.values():
f.write(line + "\n")
f.write("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("ar_path", help="Path of the .a file to build signatures for")
parser.add_argument("sig_name", help="Name of the signature (a string inside the signature file)")
parser.add_argument("sig_path", help="File name of the generated signature")
parser.add_argument("--compiler", help="Name of the compiler (e.g., gcc, clang). It will be stored in the meta "
"data file.")
parser.add_argument("--compiler_version", help="Version of the compiler (e.g., 6). It will be stored in the meta "
"data file.")
# parser.add_argument("--platform", help="Name of the platform (e.g., windows/linux/macos). It will be stored in
# the meta data file.")
parser.add_argument("--os", help="Name of the operating system (e.g., ubuntu/debian). It will be stored in the "
"meta data file.")
parser.add_argument("--os_version", help="Version of the operating system (e.g., 20.04). It will be stored in the "
"meta data file.")
parser.add_argument("--pelf_path", help="Path of pelf")
parser.add_argument("--sigmake_path", help="Path of sigmake")
args = parser.parse_args()
if args.pelf_path:
pelf_path = args.pelf_path
elif "pelf_path" in os.environ:
pelf_path = os.environ['pelf_path']
else:
raise ValueError("pelf_path must be specified.")
if args.sigmake_path:
sigmake_path = args.sigmake_path
elif "sigmake_path" in os.environ:
sigmake_path = os.environ['sigmake_path']
else:
raise ValueError("sigmake_path must be specified.")
compiler = args.compiler
if compiler:
compiler = compiler.lower()
compiler_version = args.compiler_version
if compiler_version:
compiler_version = compiler_version.lower()
os_name = args.os
if os_name:
os_name = os_name.lower()
os_version = args.os_version
if os_version:
os_version = os_version.lower()
# Get basic information
# Get basic information
basic_info = get_basic_info(args.ar_path)
# Get unique st
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.3.0/Lib/tkinter/ttk.py
|
Python
|
mit
| 56,245 | 0.002471 |
"""Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
_flatten = tkinter._flatten
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
format = "%s" if not script else "{%s}"
opts = []
for opt, value in optdict.items():
if ignore and opt in ignore:
continue
|
if isinstance(value, (list, tuple)):
v = []
for val in value:
if isinstance(val, str):
v.append(str(val) if val else '{}')
else:
v.append(str(val))
# format v according to the script option, but also check for
# space in any value in v in order to group them correctly
value = format % ' '.join(
('{%s}' if ' '
|
in val else '%s') % val for val in v)
if script and value == '':
value = '{}' # empty string in Python is equivalent to {} in Tcl
opts.append(("-%s" % opt, value))
# Remember: _flatten skips over None
return _flatten(opts)
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
format = "%s" if not script else "{%s}"
opts = []
for opt, value in mapdict.items():
opt_val = []
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
for statespec in value:
state, val = statespec[:-1], statespec[-1]
if len(state) > 1: # group multiple states
state = "{%s}" % ' '.join(state)
else: # single state
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or '{}'
if isinstance(val, (list, tuple)): # val needs to be grouped
val = "{%s}" % ' '.join(map(str, val))
opt_val.append("%s %s" % (state, val))
opts.append(("-%s" % opt, format % ' '.join(opt_val)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _format_mapdict({None: args[1:]})[1]
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _format_mapdict({None: args[2:]})[1]
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (args[1], )
if script:
spec = '{%s}' % spec
opts = ' '.join(map(str, opts))
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(map(str, _format_optdict(opts, True, "children")))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(map(str, _format_optdict(opts['configure'], True)))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(map(str, _format_mapdict(opts['map'], True)))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other
|
machawk1/pywb
|
pywb/rewrite/test/test_rewrite_live.py
|
Python
|
gpl-3.0
| 9,395 | 0.007025 |
from pywb.rewrite.rewrite_live import LiveRewriter
from pywb.rewrite.url_rewriter import UrlRewriter
from pywb.rewrite.wburl import WbUrl
from pywb import get_test_dir
from io import BytesIO
# This module has some rewriting tests against the 'live web'
# As such, the content may change and the test may break
urlrewriter = UrlRewriter('20131226101010/http://example.com/some/path/index.html', '/pywb/')
bn_urlrewriter = UrlRewriter('20131226101010bn_/http://example.com/some/path/index.html', '/pywb/')
def head_insert_func(rule, cdx):
if rule.js_rewrite_location != 'urls':
return '<script src="/static/__pywb/wombat.js"> </script>'
else:
return ''
def test_csrf_token_headers():
rewriter = LiveRewriter()
env = {'HTTP_X_CSRFTOKEN': 'wrong', 'HTTP_COOKIE': 'csrftoken=foobar'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-CSRFToken': 'foobar', 'Cookie': 'csrftoken=foobar'}
def test_forwarded_scheme():
rewriter = LiveRewriter()
env = {'HTTP_X_FORWARDED_PROTO': 'https', 'Other': 'Value'}
req_headers = rewriter.translate_headers('http://example.com/', 'com,example)/', env)
assert req_headers == {'X-Forwarded-Proto': 'http'}
def test_req_cookie_rewrite_1():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'A=B'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'A=B; FOO=&bar=1'}
def test_req_cookie_rewrite_2():
rewriter = LiveRewriter()
env = {'HTTP_COOKIE': 'FOO=goo'}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': 'FOO=&bar=1'}
def test_req_cookie_rewrite_3():
rewriter = LiveRewriter()
env = {}
urlkey = 'example,example,test)/'
url = 'test.example.example/'
req_headers = rewriter.translate_headers(url, urlkey, env)
assert req_headers == {'Cookie': '; FOO=&bar=1'}
def test_local_1():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location rewritten
assert 'window.WB_wombat_location = "/other.html"' in buff
# link rewritten
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_no_head_banner_only():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_no_head.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# location NOT rewritten
assert 'window.location = "/other.html"' in buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_banner_only_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
bn_urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff, buff
# link NOT rewritten
assert '"another.html"' in buff
def test_local_2_link_only_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test)/nolocation_rewrite')
# no wombat insert
assert '<head><script src="/static/__pywb/wombat.js"> </script>' not in buff
# JS location NOT rewritten, JS link rewritten
assert 'window.location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_js_loc_only_rewrite():
status_headers, buff = get_rewritten(
|
get_test_dir() + 'text_content/sample.html',
|
urlrewriter,
head_insert_func,
'example,example,test,loconly)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location rewritten, JS link NOT rewritten
assert 'window.WB_wombat_location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_2_no_rewrite():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample.html',
urlrewriter,
head_insert_func,
'example,example,test,norewrite)/')
# wombat insert added
assert '<script src="/static/__pywb/wombat.js"> </script>' in buff
# JS location NOT rewritten, JS link NOT rewritten
assert 'window.location = "http:\/\/example.com/dynamic_page.html"' in buff
# still link rewrite in HTML
assert '"/pywb/20131226101010/http://example.com/some/path/another.html"' in buff
def test_local_unclosed_script():
status_headers, buff = get_rewritten(get_test_dir() + 'text_content/sample_unclosed_script.html',
urlrewriter,
head_insert_func,
'com,example,test)/')
# wombat insert added
assert '<head><script src="/static/__pywb/wombat.js"> </script>' in buff, buff
# JS location and JS link rewritten
assert 'window.WB_wombat_location = "/pywb/20131226101010/http:\/\/example.com/dynamic_page.html";\n}\n</script>' in buff, buff
def test_example_1():
status_headers, buff = get_rewritten('http://example.com/', urlrewriter, req_headers={'Connection': 'close'})
# verify header rewriting
assert (('X-Archive-Orig-connection', 'close') in status_headers.headers), status_headers
# verify utf-8 charset detection
assert status_headers.get_header('content-type') == 'text/html; charset=utf-8'
assert '/pywb/20131226101010/http://www.iana.org/domains/example' in buff, buff
def test_example_2_redirect():
status_headers, buff = get_rewritten('http://httpbin.org/redirect-to?url=http://example.com/', urlrewriter)
# redirect, no content
assert status_headers.get_statuscode() == '302'
assert len(buff) == 0
def test_example_3_rel():
status_headers, buff = get_rewritten('//example.com/', urlrewriter)
assert status_headers.get_statuscode() == '200'
def test_example_4_rewrite_err():
# may occur in case of rewrite mismatch, the /// gets stripped off
status_headers, buff = get_rewritten('http:
|
jromang/retina-old
|
distinclude/spyderlib/plugins/onlinehelp.py
|
Python
|
gpl-3.0
| 3,199 | 0.00813 |
# -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see spyderlib/__init__.py for details)
"""Online Help Plugin"""
from spyderlib.qt.QtCore import Signal
import os.path as osp
# Local imports
from spyderlib.baseconfig import get_conf_path, _
from spyderlib.widgets.pydocgui import PydocBrowser
from spyderlib.plugins import SpyderPluginMixin
class OnlineHelp(PydocBrowser, SpyderPluginMixin):
"""
Online Help Plugin
"""
sig_option_changed = Signal(str, object)
CONF_SECTION = 'onlinehelp'
LOG_PATH = get_conf_path('.onlinehelp')
def __init__(self, parent):
self.main = parent
PydocBrowser.__init__(self, parent)
SpyderPluginMixin.__init__(self, parent)
# Initialize plugin
self.initialize_plugin()
self.register_widget_shortcuts("Editor", self.find_widget)
self.webview.set_zoom_factor(self.get_option('zoom_factor'))
self.url_combo.setMaxCount(self.get_option('max_history_entries'))
self.url_combo.addItems( self.load_history() )
#------ Public API ---------------------------------------------------------
def load_history(self, obj=None):
"""Load history from a text file in user home directory"""
if osp.isfile(self.LOG_PATH):
history = [line.replace('\n','')
for line in file(self.LOG_PATH, 'r').readlines()]
else:
history = []
return history
def save_history(self):
"""Save history to a text file in user home directory"""
file(self.LOG_PATH, 'w').write("\n".join( \
[ unicode( self.url_combo.itemText(index) )
for index in range(self.url_combo.count()) ] ))
#------ SpyderPluginMixin API ---------------------------------------------
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginMixin.visibility_changed(self, enable)
if enable and not self.is_server_running():
self.initialize()
#------ SpyderPluginWidget API ---------------------------------------------
def get_plugin_title(self):
"""Return widget title"""
return _('Online help')
def get_focus_widget(self):
"""
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level
"""
self.url_combo.lineEdit().selectAll()
return self.url_combo
def closing_plugin(self,
|
cancelable=False):
"""Perform actions before
|
parent main window is closed"""
self.save_history()
self.set_option('zoom_factor', self.webview.get_zoom_factor())
return True
def refresh_plugin(self):
"""Refresh widget"""
pass
def get_plugin_actions(self):
"""Return a list of actions related to plugin"""
return []
def register_plugin(self):
"""Register plugin in Spyder's main window"""
self.main.add_dockwidget(self)
|
partofthething/home-assistant
|
homeassistant/components/hive/switch.py
|
Python
|
apache-2.0
| 2,449 | 0.000817 |
"""Support for the Hive switches."""
from datetime import timedelta
from homeassistant.components.switch import SwitchEntity
from . import ATTR_AVAILABLE, ATTR_MODE, DATA_HIVE, DOMAIN, HiveEntity, refresh_system
PARALLEL_UPDATES = 0
SCAN_INTERVAL = timedelta(seconds=15)
async def async
|
_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Hi
|
ve Switch."""
if discovery_info is None:
return
hive = hass.data[DOMAIN].get(DATA_HIVE)
devices = hive.devices.get("switch")
entities = []
if devices:
for dev in devices:
entities.append(HiveDevicePlug(hive, dev))
async_add_entities(entities, True)
class HiveDevicePlug(HiveEntity, SwitchEntity):
"""Hive Active Plug."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def name(self):
"""Return the name of this Switch device if any."""
return self.device["haName"]
@property
def available(self):
"""Return if the device is available."""
return self.device["deviceData"].get("online")
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return {
ATTR_AVAILABLE: self.attributes.get(ATTR_AVAILABLE),
ATTR_MODE: self.attributes.get(ATTR_MODE),
}
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.device["status"]["power_usage"]
@property
def is_on(self):
"""Return true if switch is on."""
return self.device["status"]["state"]
@refresh_system
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
if self.device["hiveType"] == "activeplug":
await self.hive.switch.turn_on(self.device)
@refresh_system
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self.device["hiveType"] == "activeplug":
await self.hive.switch.turn_off(self.device)
async def async_update(self):
"""Update all Node data from Hive."""
await self.hive.session.updateData(self.device)
self.device = await self.hive.switch.get_plug(self.device)
|
sassoftware/rmake3
|
rmake/cmdline/monitor.py
|
Python
|
apache-2.0
| 26,810 | 0.001567 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Watch a running build job and output changes to the screen.
"""
import fcntl
import os
import select
import socket
import sys
import tempfile
import termios
import time
import traceback
from rmake import errors
from rmake.build import buildjob, buildtrove
from rmake.cmdline import query
def _getUri(client):
if not isinstance(client.uri, str) or client.uri.startswith('unix://'):
fd, tmpPath = tempfile.mkstemp()
os.close(fd)
uri = 'unix://' + tmpPath
else:
host = socket.gethostname()
uri = 'http://%s' % host
tmpPath = None
return uri, tmpPath
def monitorJob(client, jobId, showTroveDetails=False, showBuildLogs=False,
exitOnFinish=None, uri=None, serve=True, out=None,
displayClass=None):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
if not displayClass:
displayClass = JobLogDisplay
try:
display = displayClass(client, showBuildLogs=showBuildLogs, out=out,
exitOnFinish=exitOnFinish)
client = client.listenToEvents(uri, jobId, display,
showTroveDetails=showTroveDetails,
serve=serve)
return client
finally:
if serve and tmpPath:
os.remove(tmpPath)
def waitForJob(client, jobId, uri=None, serve=True):
if not uri:
uri, tmpPath = _getUri(client)
else:
tmpPath = None
try:
display = SilentDisplay(client)
display._primeOutput(jobId)
return client.listenToEvents(uri, jobId, display, serve=serve)
finally:
if tmpPath:
os.remove(tmpPath)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=True):
self.client = client
self.finished = False
self.exitOnFinish = True # override exitOnFinish setting
self.showBuildLogs = showBuildLogs
if not out:
out = sys.stdout
self.out = out
def close(self):
pass
def _serveLoopHook(self):
pass
def _msg(self, msg, *args):
self.out.write('[%s] %s\n' % (time.strftime('%X'), msg))
self.out.flush()
def _jobStateUpdated(self, jobId, state, status):
isFinished = (state in (buildjob.JOB_STATE_FAILED,
buildjob.JOB_STATE_BUILT))
if isFinished:
self._setFinished()
def _setFinished(self):
self.finished = True
def _isFinished(self):
return self.finished
def _shouldExit(self):
return self._isFinished() and self.exitOnFinish
def _primeOutput(self, jobId):
job = self.client.getJob(jobId, withTroves=False)
if job.isFinished():
self._setFinished()
class SilentDisplay(_AbstractDisplay):
pass
class JobLogDisplay(_AbstractDisplay):
def __init__(self, client, showBuildLogs=True, out=None,
exitOnFinish=None):
_AbstractDisplay.__init__(self, client, out=out,
showBuildLogs=showBuildLogs,
exitOnFinish=exitOnFinish)
self.buildingTroves = {}
def _tailBuildLog(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [mark, True]
self.out.write('Tailing %s build log:\n\n' % troveTuple[0])
def _stopTailing(self, jobId, troveTuple):
mark = self.buildingTroves.get((jobId, troveTuple), [0])[0]
self.buildingTroves[jobId, troveTuple] = [ mark, False ]
def _serveLoopHook(self):
if not self.buildingTroves:
return
for (jobId, troveTuple), (mark, tail) in self.buildingTroves.items():
if not tail:
continue
try:
moreData, data, mark = self.client.getTroveBuildLog(jobId,
troveTuple,
mark)
except:
moreData = True
data = ''
self.out.write(data)
if not moreData:
del self.buildingTroves[jobId, troveTuple]
else:
self.buildingTroves[jobId, troveTuple][0] = mark
def _jobTrovesSet(self, jobId, troveData):
self._msg('[%d] - job troves set' % jobId)
def _jobStateUpdated(self, jobId, state, status):
_AbstractDisplay._jobStateUpdated(self, jobId, state, status)
state = buildjob.stateNames[state]
if self._isFinis
|
hed():
self._serveLoopHook()
self._msg('[%d] - State: %s' % (jobId, state))
if status:
self._msg('[%d] - %s' % (jobId, status))
def _jobLogUpdated(self, jobId, state, status):
self._msg('[%d] %s' % (jobId, status))
def _troveStateUpdated(self, (jobId, troveTuple), state, status):
isBuilding = (state in (buildt
|
rove.TroveState.BUILDING,
buildtrove.TroveState.RESOLVING))
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - State: %s' % (jobId, troveTuple[0], state))
if status:
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
if isBuilding and self.showBuildLogs:
self._tailBuildLog(jobId, troveTuple)
else:
self._stopTailing(jobId, troveTuple)
def _troveLogUpdated(self, (jobId, troveTuple), state, status):
state = buildtrove.stateNames[state]
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], status))
def _trovePreparingChroot(self, (jobId, troveTuple), host, path):
if host == '_local_':
msg = 'Chroot at %s' % path
else:
msg = 'Chroot at Node %s:%s' % (host, path)
self._msg('[%d] - %s - %s' % (jobId, troveTuple[0], msg))
def _primeOutput(self, jobId):
logMark = 0
while True:
newLogs = self.client.getJobLogs(jobId, logMark)
if not newLogs:
break
logMark += len(newLogs)
for (timeStamp, message, args) in newLogs:
print '[%s] [%s] - %s' % (timeStamp, jobId, message)
BUILDING = buildtrove.TroveState.BUILDING
troveTups = self.client.listTrovesByState(jobId, BUILDING).get(BUILDING, [])
for troveTuple in troveTups:
self._tailBuildLog(jobId, troveTuple)
_AbstractDisplay._primeOutput(self, jobId)
def set_raw_mode():
fd = sys.stdin.fileno()
oldTerm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
oldFlags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags | os.O_NONBLOCK)
return oldTerm, oldFlags
def restore_terminal(oldTerm, oldFlags):
fd = sys.stdin.fileno()
if oldTerm:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldTerm)
if oldFlags:
fcntl.fcntl(fd, fcntl.F_SETFL, oldFlags)
class _AbstractDisplay(object):#xmlrpc.BasicXMLRPCStatusSubscriber):
def __init__(self, client, showBuildLogs=True, out=None):
self.client = client
self.finished = False
self.showBuildLogs = showBuildLogs
self.troveStates = {}
self.troveIndex = None
self.troveDislay = False
self.ou
|
feixiao5566/Py_Rabbic
|
IO/自定义迭代器.py
|
Python
|
bsd-2-clause
| 402 | 0.00995 |
#!/usr/bin/env python
# encoding: utf-8
class MyRange(object):
def __init__(self, n):
self.idx = 0
|
self.n = n
def __iter__(self):
return self
def next(self):
if self.idx < self.n:
val = self.idx
self.idx += 1
ret
|
urn val
else:
raise StopIteration()
myRange = MyRange(3)
for i in myRange:
print i
|
hozn/keepassdb
|
setup.py
|
Python
|
gpl-3.0
| 2,399 | 0.013344 |
# -*- coding: utf-8 -*-
import os.path
import re
import warnings
try:
from setuptools import setup, find_packages
except ImportError:
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
version = '0.2.1'
news = os.path.join(os.path.dirname(__file__), 'docs', 'news.rst')
news = open(news).read()
parts = re.split(r'([0-9\.]+)\s*\n\r?-+\n\r?', news)
found_news = ''
for i in range(len(parts)-1):
if parts[i] == version:
found_news = parts[i+i]
break
if not found_news:
warnings.warn('No news for this version found.')
long_description = """
keepassdb is a Python library that provides functionality for reading and writing
KeePass 1.x (and KeePassX) password databases.
This library brings together work by multiple authors, including:
- Karsten-Kai König <kkoenig@posteo.de>
- Brett Viren <brett.viren@gmail.com>
- Wakayama Shirou <shirou.faw@gmail.com>
"""
if found_news:
title = 'Changes in %s' % version
long_description += "\n%s\n%s\n" % (title, '-'*len(title))
long_description += found_news
setup(
name = "keepassdb",
version = version,
author = "Hans Le
|
llelid",
author_email = "hans@xmpl.org",
url = "http://github.com/hozn/keepassdb",
license
|
= "GPLv3",
description = "Python library for reading and writing KeePass 1.x databases.",
long_description = long_description,
packages = find_packages(),
include_package_data=True,
package_data={'keepassdb': ['tests/resources/*']},
install_requires=['pycrypto>=2.6,<3.0dev'],
tests_require = ['nose>=1.0.3'],
test_suite = 'keepassdb.tests',
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules'
],
use_2to3=True,
zip_safe=False # Technically it should be fine, but there are issues w/ 2to3
)
|
harveybia/face-hack
|
venv/face/bin/player.py
|
Python
|
mit
| 2,210 | 0 |
#!/Users/harvey/Projects/face-hack/venv/face/bin/python
#
# The Python Imaging Library
# $Id$
#
from __future__ import print_function
try:
from tkinter import *
except ImportError:
from Tkinter import *
from PIL import Image, ImageTk
import sys
# ----
|
----------------------------------------------------------------
# an image animation player
class UI(Label):
def __init__(self, master, im):
if isinstance(im, list):
# list of images
self.im = im[
|
1:]
im = self.im[0]
else:
# sequence
self.im = im
if im.mode == "1":
self.image = ImageTk.BitmapImage(im, foreground="white")
else:
self.image = ImageTk.PhotoImage(im)
Label.__init__(self, master, image=self.image, bg="black", bd=0)
self.update()
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
def next(self):
if isinstance(self.im, list):
try:
im = self.im[0]
del self.im[0]
self.image.paste(im)
except IndexError:
return # end of list
else:
try:
im = self.im
im.seek(im.tell() + 1)
self.image.paste(im)
except EOFError:
return # end of file
try:
duration = im.info["duration"]
except KeyError:
duration = 100
self.after(duration, self.next)
self.update_idletasks()
# --------------------------------------------------------------------
# script interface
if __name__ == "__main__":
if not sys.argv[1:]:
print("Syntax: python player.py imagefile(s)")
sys.exit(1)
filename = sys.argv[1]
root = Tk()
root.title(filename)
if len(sys.argv) > 2:
# list of images
print("loading...")
im = []
for filename in sys.argv[1:]:
im.append(Image.open(filename))
else:
# sequence
im = Image.open(filename)
UI(root, im).pack()
root.mainloop()
|
kevin-intel/scikit-learn
|
sklearn/impute/_knn.py
|
Python
|
bsd-3-clause
| 11,682 | 0 |
# Authors: Ashim Bhattarai <ashimb9@gmail.com>
# Thomas J Fan <thomasjpfan@gmail.com>
# License: BSD 3 clause
import numpy as np
from ._base import _BaseImputer
from ..utils.validation import FLOAT_DTYPES
from ..metrics import pairwise_distances_chunked
from ..metrics.pairwise import _NAN_METRICS
from ..neighbors._base import _get_weights
from ..neighbors._base import _check_weights
from ..utils import is_scalar_nan
from ..utils._mask import _get_mask
from ..utils.validation import check_is_fitted
class KNNImputer(_BaseImputer):
"""Imputation for completing missing values using k-Nearest Neighbors.
Each sample's missing values are imputed using the mean value from
`n_neighbors` nearest neighbors found in the training set. Two samples are
close if the features that neither is missing are close.
Read more in the :ref:`User Guide <knnimpute>`.
.. versionadded:: 0.22
Parameters
----------
missing_values : int, float, str, np.nan or None, default=np.nan
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For pandas' dataframes with
nullable integer dtypes with missing values, `missing_values`
should be set to np.nan, since `pd.NA` will be converted to np.nan.
n_neighbors : int, default=5
Number of neighboring samples to use for imputation.
weights : {'uniform', 'distance'} or callable, default='uniform'
Weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood are
weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- callable : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
metric : {'nan_euclidean'} or callable, default='nan_euclidean'
Distance metric for searching neighbors. Possible values:
- 'nan_euclidean'
- callable : a user-defined function which conforms to the definition
of ``_pairwise_callable(X, Y, metric, **kwds)``. The function
accepts two arrays, X and Y, and a `missing_values` keyword in
`kwds` and returns a scalar distance value.
copy : bool, default=True
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible.
add_indicator : bool, default=False
If True, a :class:`MissingIndicator` transform will stack onto the
output of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on the
missing indicator even if there are missing values at transform/test
time.
Attributes
----------
indicator_ : :class:`~sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
References
----------
* Olga Troyanskaya, Michael Cantor, Gavin Sherlock, Pat Brown, Trevor
Hastie, Robert Tibshirani, David Botstein and Russ B. Altman, Missing
value estimation methods for DNA microarrays, BIOINFORMATICS Vol. 17
no. 6, 2001 Pages 520-525.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import KNNImputer
>>> X = [[1, 2, np.nan], [3, 4, 3], [np.nan, 6, 5], [8, 8, 7]]
>>> imputer = KNNImputer(n_neighbors=2)
>>> imputer.fit_transform(X)
array([[1. , 2. , 4. ],
[3. , 4. , 3. ],
[5.5, 6. , 5. ],
[8. , 8. , 7. ]])
"""
def __init__(self, *, missing_values=np.nan, n_neighbors=5,
weights="uniform", metric="nan_euclidean", copy=True,
add_indicator=False):
super().__init__(
missing_values=missing_values,
add_indicator=add_indicator
)
self.n_neighbors = n_neighbors
self.weights = weights
self.metric = metric
self.copy = copy
def _calc_impute(self, dist_pot_donors, n_neighbors,
fit_X_col, mask_fit_X_col):
"""Helper function to impute a single column.
Parameters
----------
dist_pot_donors : ndarray of shape (n_receivers, n_potential_donors)
Distance matrix between the receivers and potential donors from
training set. There must be at least one non-nan distance between
a receiver and a potential donor.
n_neighbors : int
Number of neighbors to consider.
fit_X_col : ndarray of shape (n_potential_donors,)
Column of potential donors from training set.
mask_fit_X_col : ndarray of shape (n_potential_donors,)
Missing mask for fit_X_col.
Returns
-------
imputed_values: ndarray of shape (n_receivers,)
Imputed values for receiver.
"""
# Get donors
donors_idx = np.argpartition(dist_pot_donors, n_neighbors - 1,
axis=1)[:, :n_neighbors]
# Get weight matrix from from distance matrix
donors_dist = dist_pot_donors[
np.arange(donors_idx.shape[0])[:, None], donors_idx]
weight_matrix = _get_weights(donors_dist, self.weights)
# fill nans with zeros
if weight_matrix is not None:
weight_matrix[np.isnan(weight_matrix)] = 0.0
# Retrieve donor values and calculate kNN average
|
don
|
ors = fit_X_col.take(donors_idx)
donors_mask = mask_fit_X_col.take(donors_idx)
donors = np.ma.array(donors, mask=donors_mask)
return np.ma.average(donors, axis=1, weights=weight_matrix).data
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : array-like shape of (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
self : object
"""
# Check data integrity and calling arguments
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
if self.metric not in _NAN_METRICS and not callable(self.metric):
raise ValueError(
"The selected metric does not support NaN values")
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got {}".format(self.n_neighbors))
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=self.copy)
_check_weights(self.weights)
self._fit_X = X
self._mask_fit_X = _get_mask(self._fit_X, self.missing_values)
super()._fit_indicator(self._mask_fit_X)
return self
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input data to complete.
Returns
-------
X : array-like of shape (n_samples, n_output_features)
The imputed dataset. `n_output_features` is the number of features
that is not always missing during `fit`.
"""
check_is_fitted(self)
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = self._validate_data(X, accept_sparse=False, dtype=FLOAT_DTYPES,
force_all_finite=force_all_finite,
copy=s
|
johnkerl/scripts-math
|
pythonlib/bin/jac.py
|
Python
|
bsd-2-clause
| 5,698 | 0.044577 |
#!/usr/bin/python -Wall
# ================================================================
# Copyright (c) John Kerl 2007
# kerl.john.r@gmail.com
# ================================================================
from __future__ import division # 1/2 = 0.5, not 0.
from math import *
from sackmat_m import *
import copy
# ----------------------------------------------------------------
# Let
# F: R^m -> R^n
# i.e.
# [ F_1(x_1, ..., x_m) ]
# F(x) = [ : : : ]
# [ F_n(x_1, ..., x_m) ].
# Then Dij = dFi/dxj, i=1..n, j=1..m (an n x m matrix).
# This is numerically approximated (forward-difference approximation) by
# (F(x1,...,xj+h,...,xn) - F(x1,...,xj,...,xn)) / h
# or (centered-difference approximation)
# (F(x1,...,xj+h/2,...,xn) - F(x1,...,xj-h/2,...,xn)) / h.
def jac(F, q, h=1e-6):
m = len(q)
n = len(F(q))
DFq = make_zero_matrix(n, m)
# Centered-difference approximation
h2 = 0.5 * h
for j in range(0, m):
qb = copy.copy(q)
qf = copy.copy(q)
qb[j] -= h2
qf[j] += h2
Fqb = F(qb)
Fqf = F(qf)
for i in range(0, n):
DFq[i][j] = (Fqf[i] - Fqb[i]) / h
return DFq
# ----------------------------------------------------------------
def F1(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def F2(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def do_point(F,q):
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
#print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def do_point_with_det(F,q):
print "-" * 40
print "q =", q
DFq = jac(F, q)
print "DFq="
print DFq
print "det(DFq) =", DFq.det()
# ----------------------------------------------------------------
def frufru():
F = F1
do_point_with_det(F, [0,0,0])
print
do_point_with_det(F, [0,0,1])
do_point_with_det(F, [0,1,0])
do_point_with_det(F, [1,0,0])
print
do_point_with_det(F, [1,1,0])
do_point_with_det(F, [1,0,1])
do_point_with_det(F, [0,1,1])
print
do_point_with_det(F, [1,1,1])
do_point_with_det(F, [1,2,3])
do_point_with_det(F, [sqrt(0.5),sqrt(0.5),0])
a=0.1
do_point_with_det(F, [cos(a),sin(a),0])
a = 0.2
b = 0.3
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
a = 0.8
b = 0.2
c = sqrt(1 - a**2 - b**2)
do_point_with_det(F, [a,b,c])
print
# ----------------------------------------------------------------
def F(q):
[x, y, z] = q
#f1 = x**2
#f2 = y**2
#f3 = z**2
#f1 = x**2 * y**2
#f2 = y**2 * z**2
#f3 = z**2 * x**2
f1 = x * y
f2 = y * z
f3 = z * x
#f1 = 1.0 * y * y
#f2 = 2.0 * x
#f3 = 3.0 * z
return [f1, f2, f3]
# ----------------------------------------------------------------
def G(q):
[x, y, z] = q
return [x**2 + y**2 + z**2]
# ----------------------------------------------------------------
def gt_something():
thetalo = 0
thetahi = 2*math.pi
philo = 0
phihi = math.pi
nphi = 12
ntheta = 12
if (len(sys.argv) == 3):
nphi = int(sys.argv[1])
ntheta = int(sys.argv[2])
dtheta = (thetahi-thetalo)/ntheta
dphi = (phihi-philo)/nphi
phi = 0
for ii in range(0, nphi):
theta = 0
for jj in range(0, ntheta):
x = sin(phi) * cos(theta)
y = sin(phi) * sin(theta)
z = cos(phi)
q = [x,y,z]
DF = jac(F, q)
d = DF.det()
# Let G(x,y,z) = x^2 + y^2 + z^2. The unit sphere is the level set
# for G(x,y,z) = 1.
# Tangent plane at (u,v,w):
# dG/dx(x-u) + dG/dy(y-v) + dG/dz(z-w)
# where (u,v,w) are the coordinates of the point q and (x,y,z) are variable.
DG = jac(G, q)
# For DF restricted to this tangent plane:
# * DG (i.e. grad G) is the normal vector
# * This gives a point-normal form for the tangent plane
# * Project the standard basis for R3 onto the tangent plane
# * Row-reduce
DF = jac(F, q)
# * Form an orthonormal basis
# * Compute DF of the basis
# * Row-reduce that to get the rank of DF on TM|q
#print "q = ", q,
#print "det(DF) = ", d
#print "%7.4f %7.4f %7.4f %7.4f %7.4f,%7.4f %7.4f,%7.4f %7.4f,%7.4f" % (
# x,y,z, d, DG[0][0], -DG[0][0]*x, DG[0][1], -DG[0][1]*y, DG[0][2], -DG[0][2]*z)
nx = DG[0][0]
ny = DG[0][1]
nz = DG[0][2]
nml = [nx, ny, nz]
e0 = [1,0,0]
e1 = [0,1,0]
e2 = [0,0,1]
# Project the standard basis for R3 down to the tangent plane TM|q.
proj_e0 = projperp(e0, nml)
proj_e1 = projperp(e1, nml)
proj_e2 = projperp(e2, nml)
proj_e = sackmat([proj_e0, proj_e1, proj_e2])
# Row-reduce, compute rank, and trim
proj_e.row_echelon_form()
rank = proj_e.rank_rr()
proj_e.elements = proj_e.elements[0:rank]
# Orthonormalize
proj_e = gram_schmidt(proj_e)
#print "q=[%7.4f,%7.4f,%7.4f]" % (x, y, z),
#print "nml=[%7.4f,%7.4f,%7.4f]" % (nx, ny, nz),
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
# Take DF of the orthonormal basis.
proj_e = proj_e.transpose()
proj_e = DF * proj_e
proj_e = proj_e.t
|
ranspose()
rank = proj_e.rank()
|
#print "p0=[%7.4f,%7.4f,%7.4f] p1=[%7.4f,%7.4f,%7.4f]" % (
#proj_e[0][0], proj_e[0][1], proj_e[0][2], proj_e[1][0], proj_e[1][1], proj_e[1][2]),
#print "rank=", proj_e.rank_rr(),
#print "d=%11.3e" % (d),
# xxx hack
if (rank == 1):
d = 0.7
#print "%11.3e" % (d),
print "%8.4f" % (d),
#print
theta += dtheta
print
phi += dphi
gt_something()
|
Azure/azure-sdk-for-python
|
sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_06_01/aio/_web_site_management_client.py
|
Python
|
mit
| 8,647 | 0.004857 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import WebSiteManagementClientConfiguration
from .operations import AppServiceCertificateOrdersOperations, AppServiceEnvironmentsOperations, AppServicePlansOperations, CertificateRegistrationProviderOperations, CertificatesOperations, DeletedWebAppsOperations, DiagnosticsOperations, DomainRegistrationProviderOperations, DomainsOperations, ProviderOperations, RecommendationsOperations, ResourceHealthMetadataOperations, StaticSitesOperations, TopLevelDomainsOperations, WebAppsOperations, WebSiteManagementClientOperationsMixin
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class WebSiteManagementClient(WebSiteManagementClientOperationsMixin):
"""WebSite Management Client.
:ivar app_service_certificate_orders: AppServiceCertificateOrdersOperations operations
:vartype app_service_certificate_orders:
azure.mgmt.web.v2020_06_01.aio.operations.AppServiceCertificateOrdersOperations
:ivar certificate_registration_provider: CertificateRegistrationProviderOperations operations
:vartype certificate_registration_provider:
azure.mgmt.web.v2020_06_01.aio.operations.CertificateRegistrationProviderOperations
:ivar domains: DomainsOperations operations
:vartype domains: azure.mgmt.web.v2020_06_01.aio.operations.DomainsOperations
:ivar top_level_domains: TopLevelDomainsOperations operations
:vartype top_level_domains: azure.mgmt.web.v2020_06_01.aio.operations.TopLevelDomainsOperations
:ivar domain_registration_provider: DomainRegistrationProviderOperations operations
:vartype domain_registration_provider:
azure.mgmt.web.v2020_06_01.aio.operations.DomainRegistrationProviderOperations
:ivar certificates: CertificatesOperations operations
:vartype certificates: azure.mgmt.web.v2020_06_01.aio.operations.CertificatesOperations
:ivar deleted_web_apps: DeletedWebAppsOperations operations
:vartype deleted_web_apps: azure.mgmt.web.v2020_06_01.aio.operations.DeletedWebAppsOperations
:ivar diagnostics: DiagnosticsOperations operations
:vartype diagnostics: azure.mgmt.web.v2020_06_01.aio.operations.DiagnosticsOperations
:ivar provider: ProviderOperations operations
:vartype provider: azure.mgmt.web.v2020_06_01.aio.operations.ProviderOperations
:ivar recommendations: RecommendationsOperations operations
:vartype recommendations: azure.mgmt.web.v2020_06_01.aio.operations.RecommendationsOperations
:ivar web_apps: WebAppsOperations operations
:vartype web_apps: azure.mgmt.web.v2020_06_01.aio.operations.WebAppsOperations
:ivar static_sites: StaticSitesOperations operations
:vartype static_sites: azure.mgmt.web.v2020_06_01.aio.operations.StaticSitesOperations
:ivar app_service_environments: AppServiceEnvironmentsOperations operations
:vartype app_service_environments:
azure.mgmt.web.v2020_06_01.aio.operations.AppServiceEnvironmentsOperations
:ivar app_service_plans: AppServicePlansOperations operations
:vartype app_service_plans: azure.mgmt.web.v2020_06_01.aio.operations.AppServicePlansOperations
:ivar resource_health_metadata: ResourceHealthMetadataOperations operations
:vartype resource_health_metadata:
azure.mgmt.web.v2020_06_01.aio.operations.ResourceHealthMetadataOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Your Azure subscription ID. This is a GUID-formatted string (e.g.
00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = WebSiteManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.app_service_certificate_orders = AppServiceCertificateOrdersOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificate_registration_provider = CertificateRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.domains = DomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.top_level_domains = TopLevelDomainsOperations(self
|
._client, self._config, self._serialize, self._deserialize)
self.domain_registration_provider = DomainRegistrationProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.certificates = CertificatesOperations(self._client, self._config, self._serialize, self._deserialize
|
)
self.deleted_web_apps = DeletedWebAppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.diagnostics = DiagnosticsOperations(self._client, self._config, self._serialize, self._deserialize)
self.provider = ProviderOperations(self._client, self._config, self._serialize, self._deserialize)
self.recommendations = RecommendationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.web_apps = WebAppsOperations(self._client, self._config, self._serialize, self._deserialize)
self.static_sites = StaticSitesOperations(self._client, self._config, self._serialize, self._deserialize)
self.app_service_environments = AppServiceEnvironmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.app_service_plans = AppServicePlansOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_health_metadata = ResourceHealthMetadataOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
|
1m0r74l17y/FortyTwo
|
FortyTwo/__init__.py
|
Python
|
mit
| 80 | 0.0125 |
fro
|
m Fort
|
yTwo.fortytwo import *
def Start():
"""No Clue what to add here"""
|
sloria/sepal
|
sepal/datasets/tasks.py
|
Python
|
bsd-3-clause
| 8,545 | 0.007373 |
import os
from django.conf import settings
import yaafelib as yf
import wave
import contextlib
from celery import task
from sepal.datasets.models import *
from sepal.datasets.utils import filter_by_key, find_dict_by_item
@task()
def handle_uploaded_file(f):
'''Saves an uploaded data source to MEDIA_ROOT/data_sources
'''
with open(os.path.join(settings.MEDIA_ROOT, 'data_sources', f.name), 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return destination
@task()
def ex
|
tract_features(dataset_id, instance_id, audiofile_path):
dataset = Dataset.o
|
bjects.get(pk=dataset_id)
inst = Instance.objects.get(pk=instance_id)
n_frames, sample_rate, duration = 0, 0, 0
# Calculate the sample rate and duration
with contextlib.closing(wave.open(audiofile_path, 'r')) as audiofile:
n_frames = audiofile.getnframes()
sample_rate = audiofile.getframerate()
duration = n_frames / float(sample_rate)
# Format - {'Display name': 'name: Definition'}
FEATURES = [
{'display_name': 'Spectral Shape Characteristics',
'yaafe_name': 'sss',
'yaafe_definition': 'SpectralShapeStatistics',
'subfeatures': ['Spectral centroid', 'Spectral spread', 'Spectral kurtosis', 'Spectral skewness']
},
{'display_name': 'Temporal Shape Characteristics',
'yaafe_name': 'tss',
'yaafe_definition': 'TemporalShapeStatistics',
'subfeatures': ['Temporal centroid', 'Temporal spread', 'Temporal kurtosis', 'Temporal skewness']
},
{'display_name': 'ZCR',
'yaafe_name': 'zcr',
'yaafe_definition': 'ZCR',
'unit': 'Hz'
},
{'display_name': 'Energy',
'yaafe_name': 'energy',
'yaafe_definition': 'Energy',
},
{'display_name': 'Loudness',
'yaafe_name': 'loudness',
'yaafe_definition': 'Loudness',
},
{'display_name': 'Spectral rolloff',
'yaafe_name': 'spectral_rolloff',
'yaafe_definition': 'SpectralRolloff',
},
{'display_name': 'Perceptual sharpness',
'yaafe_name': 'perceptual_sharpness',
'yaafe_definition': 'PerceptualSharpness',
},
{'display_name': 'Perceptual spread',
'yaafe_name': 'perceptual_spread',
'yaafe_definition': 'PerceptualSpread',
},
{'display_name': 'Duration',
'unit': 's',
},
{'display_name': 'Sample rate',
'unit': 'Hz',
},
{'display_name': 'Spectral decrease',
'yaafe_name': 'spectral_decrease',
'yaafe_definition': 'SpectralDecrease',
},
{'display_name': "Spectral flatness",
'yaafe_name': 'spectral_flatness',
'yaafe_definition': 'SpectralFlatness',
},
# {'display_name': "Spectral flux",
# 'yaafe_name': 'spectral_flux',
# 'yaafe_definition': 'SpectralFlux',
# },
{'display_name': "Spectral slope",
'yaafe_name': 'spectral_slope',
'yaafe_definition': 'SpectralSlope',
},
# {'display_name': "Spectral variation",
# 'yaafe_name': 'spectral_variation',
# 'yaafe_definition': 'SpectralVariation',
# }
]
# Add features to extract
feature_plan = yf.FeaturePlan(sample_rate=sample_rate, resample=False)
for feature in FEATURES:
if 'yaafe_definition' in feature:
# YAAFE feature plans take definitions of the form: 'zcr: ZCR'
full_definition = feature['yaafe_name'] + ': ' + feature['yaafe_definition']
# Add the feature to the feature plan to be extracted
feature_plan.addFeature(full_definition)
# Configure an Engine
engine = yf.Engine()
engine.load(feature_plan.getDataFlow())
# Extract features
afp = yf.AudioFileProcessor()
afp.processFile(engine, audiofile_path)
# outputs dict format - {'Spectral centroid': [[2.33], [4.34],...[2.55]]}
outputs = {}
# Read and store output arrays to outputs dict
for feature in FEATURES:
if 'yaafe_definition' in feature: # Exclude duration and sample rate
output_name = feature['yaafe_name']
# If the feature has subfeatures, e.g. Spec shape stats
if 'subfeatures' in feature:
full_output = engine.readOutput(output_name)
for i, subfeature_display_name in enumerate(feature['subfeatures']):
outputs[subfeature_display_name] = full_output[:, i]
# If the feature has only 1 dimension(1 X T array)
else:
display_name = feature['display_name']
a = engine.readOutput(output_name) # 2D array
# Transpose data to make it a 1D array
outputs[display_name] = a.transpose()[0]
# Create YAAFE feature objects
feature_obj_list = []
for display_name in outputs.keys():
feature = find_dict_by_item(('display_name', display_name), FEATURES)
f, created = Feature.objects.get_or_create(
name=display_name.lower(),
display_name=display_name
)
if feature and ('unit' in feature):
f.unit = feature['unit']
f.save()
feature_obj_list.append(f)
# Create Sample rate and Duration objects
rate_obj, created = Feature.objects.get_or_create(name='sample rate')
if not rate_obj.unit:
rate_obj.unit = 'Hz'
rate_obj.save()
feature_obj_list.append(rate_obj)
duration_obj, created = Feature.objects.get_or_create(name='duration')
if not duration_obj.unit:
duration_obj.unit = 's'
duration_obj.save()
feature_obj_list.append(duration_obj)
# Associate features with instance
# for feature in feature_obj_list:
# inst.features.add(feature)
# If dataset has labels
if dataset.labels():
# NOTE: This assumes there's only one label name per dataset.
# Just indexes the first label name
label_name = dataset.labels()[0]
else:
# attach a placeholder LabelName called 'variable'
filtered = LabelName.objects.filter(name='variable')
# make sure that 'get' doesn't return an error if there are more than 1
# LabelName called 'variable'
if len(filtered) <= 1:
label_name, c = LabelName.objects.get_or_create(name='variable')
else:
label_name = filtered[0]
# Add a placeholder label value called "none" to instance
# This is necessary in order for plotting to work
filtered = LabelValue.objects.filter(value="none", label_name=label_name)
if len(filtered) <= 1:
no_label, c = LabelValue.objects.get_or_create(value="none",
label_name=label_name)
else:
no_label = filtered[0]
inst.label_values.add(no_label)
inst.save()
# Save output data and associate it with inst
for display_name, output in outputs.iteritems():
if output.size > 0: # Avoid empty data
for i in range(output[0].size):
output_mean = output[i].mean()
FeatureValue.objects.create(value=output_mean,
feature=Feature.objects.get(name_
|
blocktrail/blocktrail-sdk-python
|
tests/cross_platform_test.py
|
Python
|
mit
| 1,454 | 0.003439 |
import unittest
import hashlib
import httpsig.sign as sign
from httpsig.utils import parse_authorization_header
from requests.models import RequestEncodingMixin
class CrossPlatformTestCase(unittest.TestCase):
def test_content_md5(self):
data = {'signature': "HPMOHRgPSMKdXrU6AqQs/i9S7alOakkHsJiqLGmInt05Cxj6b/WhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk="}
assert RequestEncodingMixin._encode_params(data) == "signature=HPMOHRgPSMKdXrU6AqQs%2Fi9S7alOakk
|
HsJiqLGmInt05Cxj6b%2FWhS7kJxbIQxKmDW08YKzoFnbVZIoTI2qofEzk%3D"
assert hashlib.md5(RequestEncodingMixin._encode_params(data).encode("utf-8")).hexdigest() == "fdfc1a717d2c97649f3b8b2142507129"
def test_hmac(self):
hs = sign.HeaderSigner(key_id='pda', algorithm='hmac-sha256', secret='secret', headers=['(request-target)', 'Date'])
unsigned = {
'Date': 'today',
'accept': 'llamas'
}
signed = hs.sign(unsigned, method=
|
'GET', path='/path?query=123')
auth = parse_authorization_header(signed['authorization'])
params = auth[1]
self.assertIn('keyId', params)
self.assertIn('algorithm', params)
self.assertIn('signature', params)
self.assertEqual(params['keyId'], 'pda')
self.assertEqual(params['algorithm'], 'hmac-sha256')
self.assertEqual(params['signature'], 'SFlytCGpsqb/9qYaKCQklGDvwgmrwfIERFnwt+yqPJw=')
if __name__ == "__main__":
unittest.main()
|
istommao/wechatkit
|
wechatkit/resource.py
|
Python
|
mit
| 1,185 | 0 |
"""Resource manage module."""
import os
from .utils import RequestUtil
class ResourceAPI(object):
"""Resource wechat api."""
ADD_TEMP_URI = ('https://api.weixin.qq.com/cgi-bin/media/'
'upload?access_token={}&type={}')
@classmethod
def upload(cls, path, token, rtype, upload_type='temp'):
"""Upload resource.
:path str: Resource local p
|
ath
:token str: Wechat access token
:rtype str: Resource type such as image, voice ...
:upload_type: Upload type, Now support temp and forever
"""
if not os.path.exists(path):
return False
method = getattr(cls, '_upload_{}'.format(upload_type), None)
if method:
|
return method(path, token, rtype)
return False
@classmethod
def _upload_temp(cls, path, token, rtype):
"""Upload temp media to wechat server.
:path str: Upload entity local path
:token str: Wechat access token
:rtype str: Upload entity type
:Return dict:
"""
uri = cls.ADD_TEMP_URI.format(token, rtype)
resp = RequestUtil.upload(uri, {}, path)
return resp
|
singingwolfboy/webhookdb
|
webhookdb/load/pull_request_file.py
|
Python
|
agpl-3.0
| 1,058 | 0.002836 |
# coding=utf-8
from __future__ import unicode_literals, print_function
from flask import request, jsonify, url_for
from flask_login import current_user
import bugsnag
from . import load
from webhookdb.tasks.pull_request_file import spawn_page_tasks_for_pull_request_files
@load.route('/repos/<owner>/<repo>/pulls/<int:number>/files', methods=["POST"])
def pull
|
_request_files(owner, repo, number):
"""
Queue tasks to load the pull request files (diffs) for a single pull request
into WebhookDB.
:statuscode 202: task successfully queued
"""
bugsnag_ctx = {"owner": owner, "repo": repo, "number": number}
bugsnag.configure_request(meta_data=bugsnag_ctx)
children = bool(request.args.get("children", False))
result = spawn_page_tasks_for_pull_reque
|
st_files.delay(
owner, repo, number, children=children,
requestor_id=current_user.get_id(),
)
resp = jsonify({"message": "queued"})
resp.status_code = 202
resp.headers["Location"] = url_for("tasks.status", task_id=result.id)
return resp
|
brooksandrew/postman_problems
|
postman_problems/tests/test_example_sleeping_giant.py
|
Python
|
mit
| 6,009 | 0.002663 |
import math
import pkg_resources
import itertools
import pandas as pd
import networkx as nx
from postman_problems.viz import add_node_attributes
from postman_problems.graph import (
read_edgelist, create_networkx_graph_from_edgelist, get_odd_nodes, get_shortest_paths_distances
)
from postman_problems.solver import rpp, cpp
# ###################
# PARAMETERS / DATA #
# ###################
EDGELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/edgelist_sleeping_giant.csv')
NODELIST = pkg_resources.resource_filename('postman_problems', 'examples/sleeping_giant/nodelist_sleeping_giant.csv')
START_NODE = 'b_end_east'
#########
# TESTS #
#########
def test_read_sleeping_giant_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
# check that our Sleeping Giant example dataset contains the correct fields and values
assert ['node1', 'node2', 'trail', 'color', 'distance', 'estimate', 'required'] in df.columns.values
assert math.isclose(df[df['required'] == 1]['distance'].sum(), 26.01)
assert math.isclose(df['distance'].sum(), 30.48)
df_req = read_edgelist(EDGELIST, keep_optional=False)
assert math.isclose(df_req['distance'].sum(), 26.01)
assert 'req' not in df_req.columns
def test_create_networkx_graph_from_edgelist():
df = read_edgelist(EDGELIST, keep_optional=True)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
# check that our starting graph is created correctly
assert isinstance(graph, nx.MultiGraph)
assert len(graph.edges()) == 133
assert len(graph.nodes()) == 78
assert graph['b_end_east']['b_y'][0]['color'] == 'blue'
assert graph['b_end_east']['b_y'][0]['trail'] == 'b'
assert graph['b_end_east']['b_y'][0]['distance'] == 1.32
# check that starting graph with required trails only is correct
df_req = read_edgelist(EDGELIST, keep_optional=False)
graph_req = create_networkx_graph_from_edgelist(df_req, edge_id='id')
assert isinstance(graph_req, nx.MultiGraph)
assert len(graph_req.edges()) == 121
assert len(graph_req.nodes()) == 74
def test_add_node_attributes():
# create objects for testing
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
nodelist_df = pd.read_csv(NODELIST)
graph_node_attrs = add_node_attributes(graph, nodelist_df)
assert len(graph_node_attrs.nodes()) == 74
# check that each node attribute has an X and Y coordinate
for k, v in graph_node_attrs.nodes(data=True):
assert 'X' in v
assert 'Y' in v
# spot check node attributes for first node
node_data_from_graph = list(graph_node_attrs.nodes(data=True))
node_names = [n[0] for n in node_data_from_graph]
assert 'rs_end_north' in node_names
key = node_names.index('rs_end_north')
assert node_data_from_graph[key][1]['X'] == 1772
assert node_data_from_graph[key][1]['Y'] == 172
def test_get_shortest_paths_distances():
df = read_edgelist(EDGELIST)
graph = create_networkx_graph_from_edgelist(df, edge_id='id')
odd_nodes = get_odd_nodes(graph)
odd_node_pairs = list(itertools.combinations(odd_nodes, 2))
# coarsely checking structure of `get_shortest_paths_distances` return value
odd_node_pairs_shortest_paths = get_shortest_paths_distances(graph, odd_node_pairs, 'distance')
assert len(odd_node_pairs_shortest_paths) == 630
assert type(odd_node_pairs_shortest_paths) == dict
# check that each node name appears the same number of times in `get_shortest_paths_distances` return value
node_names = list(itertools.chain(*[i[0] for i in odd_node_pairs_shortest_paths.items()]))
assert set(pd.value_counts(node_names)) == set([35])
def test_nodelist_edgelist_overlap():
"""
Test that the nodelist and the edgelist contain the same node names. If using X,Y coordinates for plotting and
not all nodes have attributes, this could get messy.
"""
eldf = read_edgelist(EDGELIST, keep_optional=True)
nldf = pd.read_csv(NODELIST)
edgelist_nodes = set(eldf['node1'].append(eldf['node2']))
nodelist_nodes = set(nldf['id'])
nodes_in_el_but_not_nl = edgelist_nodes - nodelist_nodes
assert nodes_in_el_but_not_nl == set(), \
"Warning: The following nodes are in the edgelist, but not the nodelist: {}".format(nodes_in_el_but_not_nl)
nodes_in_nl_but_not_el = nodelist_nodes - edgelist_nodes
assert nodes_in_nl_but_not_el == set(), \
"Warning: The following nodes are in the nodelist, but not the edgelist: {}".format(nodes_in_nl_but_not_el)
def test_sleeping_giant_cpp_solution():
cpp_solution, graph = cpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(cpp_solution) == 155
# make sure our total mileage is correct
cpp_solution_distance = sum([edge[3]['distance'] for edge in cpp_solution])
assert math.isclose(cpp_solution_distance, 33.25)
# make sure our circuit begins and ends at the same place
assert cpp_solution[0][0] == cpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 121
[e[2].get('augmented') for e in graph.edges(data=True)].count(True) == 35
def test_sleeping_giant_rpp_solution():
rpp_solution, graph = rpp(edgelist_filename=EDGELIST, start_node=START_NODE)
# make number of edges in solution is correct
assert len(rpp_solution) == 151
# make sure our total mileage is correct
rpp_solution_distance = sum([edge[3]['dist
|
ance'] for edge in rpp_solution])
assert math.isclose(rpp_solution_distance, 32.12)
# make sure our circuit begins and ends at the same place
assert rpp_solution[0][0] == rpp_solution[-1][1] == START_NODE
# make sure original graph is properly returned
assert len(graph.edges()) == 133
[e[3].get('augmented') for e in graph.edges(d
|
ata=True, keys=True)].count(True) == 30
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.