commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
52f4d72387810994a7106e4fa55c3bfcda798a1c
|
Create __init__.py
|
timo-stoettner/ENN
|
ENN/__init__.py
|
ENN/__init__.py
|
mit
|
Python
|
||
f3f073379b71a13fea4255622c7df19bec02fdd7
|
bump version
|
lbolla/EMpy,DavidRimel/EMpy,DanHickstein/EMpy,demisjohn/EMpy
|
EMpy/version.py
|
EMpy/version.py
|
__author__ = 'Lorenzo Bolla'
version = '0.1.4'
|
__author__ = 'Lorenzo Bolla'
version = '0.1.3'
|
mit
|
Python
|
94610546a63a05f81942c43b12c109185a8a4aff
|
add benchmark.py for cifar_distributed_cnn
|
apache/incubator-singa,apache/incubator-singa,apache/incubator-singa,apache/incubator-singa,apache/incubator-singa
|
examples/cifar_distributed_cnn/benchmark.py
|
examples/cifar_distributed_cnn/benchmark.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# the code is modified from
# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
from singa import opt
# import opt
from singa import device
from singa import tensor
import argparse
import time
import numpy as np
from tqdm import trange
def train_resnet(DIST=True, graph=True, sequential=False, verbosity=0):
# Define the hypermeters for the train_resnet
niters = 100
batch_size = 32
sgd = opt.SGD(lr=0.1, momentum=0.9, weight_decay=1e-5)
IMG_SIZE = 224
# For distributed training, sequential has better throughput in the current version
if DIST == True:
sgd = opt.DistOpt(sgd)
world_size = sgd.world_size
local_rank = sgd.local_rank
global_rank = sgd.global_rank
sequential = True
else:
local_rank = 0
world_size = 1
global_rank = 0
sequential = False
dev = device.create_cuda_gpu_on(local_rank)
tx = tensor.Tensor((batch_size, 3, IMG_SIZE, IMG_SIZE), dev)
ty = tensor.Tensor((batch_size,), dev, tensor.int32)
x = np.random.randn(batch_size, 3, IMG_SIZE, IMG_SIZE).astype(np.float32)
y = np.random.randint(0, 1000, batch_size, dtype=np.int32)
tx.copy_from_numpy(x)
ty.copy_from_numpy(y)
dev.SetVerbosity(verbosity)
dev.SetSkipIteration(5)
# Construct the model
from model import resnet
model = resnet.resnet50(num_channels=3, num_classes=1000)
model.train()
model.set_optimizer(sgd)
model.compile([tx], is_train=True, use_graph=graph, sequential=sequential)
# Train model
dev.Sync()
start = time.time()
with trange(niters) as t:
for _ in t:
model(tx, ty, dist_option='fp32', spars=None)
dev.Sync()
end = time.time()
titer = (end - start) / float(niters)
throughput = float(niters * batch_size * world_size) / (end - start)
if global_rank == 0:
print("\nThroughput = {} per second".format(throughput), flush=True)
print("TotalTime={}".format(end - start), flush=True)
print("Total={}".format(titer), flush=True)
dev.PrintTimeProfiling()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Throughput test using Resnet 50')
parser.add_argument('--dist',
'--enable-dist',
default='False',
action='store_true',
help='enable distributed training',
dest='DIST')
parser.add_argument('--no-graph',
'--disable-graph',
default='True',
action='store_false',
help='disable graph',
dest='graph')
parser.add_argument('--verbosity',
'--log-verbosity',
default=0,
type=int,
help='logging verbosity',
dest='verbosity')
args = parser.parse_args()
train_resnet(DIST=args.DIST,
graph=args.graph,
sequential=False,
verbosity=args.verbosity)
|
apache-2.0
|
Python
|
|
05e37a58825a6b75ade5ffdd25e887f9c9a7409c
|
Add net/ip.py containing python function wrapping /sbin/ip
|
xenserver/python-libs,xenserver/python-libs
|
net/ip.py
|
net/ip.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Citrix Systems, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only. with the special
# exception on linking described in file LICENSE.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
"""
Python function using /sbin/ip for convenience
"""
__version__ = "1.0.0"
__author__ = "Andrew Cooper"
from subprocess import Popen, PIPE
from xcp.logger import LOG
def ip_link_set_name(src_name, dst_name):
"""
Rename network interface src_name to dst_name using
"ip link set $src_name name $dst_name"
"""
LOG.debug("Attempting rename %s -> %s" % (src_name, dst_name))
# Is the interface currently up?
link_show = Popen(["/sbin/ip", "link", "show", src_name], stdout = PIPE)
stdout, _ = link_show.communicate()
if link_show.returncode != 0:
LOG.error("performing \"ip link show %s\" returned %d - skipping"
% (src_name, link_show.returncode))
return
# Does the string "UP" appear?
isup = 'UP' in (stdout.split("<", 1)[1].split(">", 1)[0].split(','))
# If it is up, bring it down for the rename
if isup:
link_down = Popen(["/sbin/ip", "link", "set", src_name, "down"])
link_down.wait()
if link_down.returncode != 0:
LOG.error("Unable to bring link %s down. (Exit %d)"
% (src_name, link_down.returncode))
return
# Perform the rename
link_rename = Popen(["/sbin/ip", "link", "set", src_name, "name", dst_name])
link_rename.wait()
if link_rename.returncode != 0:
LOG.error("Unable to rename link %s to %s. (Exit %d)"
% (src_name, dst_name, link_rename.returncode))
return
# if the device was up before, bring it back up
if isup:
# Performace note: if we are doing an intermediate rename to
# move a device sideways, we shouldnt bring it back until it has
# its final name. However, i cant think of a non-hacky way of doing
# this with the current implementation
link_up = Popen(["/sbin/ip", "link", "set", dst_name, "up"])
link_up.wait()
if link_up.returncode != 0:
LOG.error("Unable to bring link %s back up. (Exit %d)"
% (src_name, link_down.returncode))
return
LOG.info("Succesfully renamed link %s to %s" % (src_name, dst_name))
|
bsd-2-clause
|
Python
|
|
2b57a443807de26c9e71c97fd029e3d8416db597
|
Add feature usage shuffler
|
DynamoDS/Coulomb,DynamoDS/Coulomb,DynamoDS/Coulomb
|
SessionTools/feature_usage_shuffler.py
|
SessionTools/feature_usage_shuffler.py
|
# Condenses all the feature files into a single location,
# Split by the names of the features
import sys
from os.path import isfile
import os
import json
path = sys.argv[1]
out_path = sys.argv[2]
paths = []
i = 0
skipped = 0
pretty_print_json_output = True
feature_versions_map = {}
def flush():
# Create one file per feature version
for k in feature_versions_map.keys():
out_full_path = out_path + "." + k + '.json'
data_to_dump = {
"feature_version" : k,
"sessions" : feature_versions_map[k]
}
with open(out_full_path, 'w') as f:
if pretty_print_json_output:
f.write(json.dumps(data_to_dump, sort_keys=True, indent=2))
else:
f.write(json.dumps(data_to_dump))
# Main function
print ('Enumerating feature files')
for root, subdirs, files in os.walk(path):
for ff in files:
i = i + 1
if i % 1000 == 0:
print (i, skipped)
flush()
path = os.path.join(root,ff)
if (path.find('.sorted.gz.features.') == -1 ):
continue
path_split = path.split ('.sorted.gz.features.')
feature_version = path_split[-1]
if not feature_versions_map.has_key(feature_version):
feature_versions_map[feature_version] = {}
session_id = path_split[0].split('/')[-1]
if feature_versions_map[feature_version].has_key(session_id):
# We've already added this session
# This can be used in a version that loads a partially complete file
print ("Session: ") + session_id + " skipped, features already added for: " + feature_version
skipped += 1
continue
feature_versions_map[feature_version][session_id] = []
paths.append(path)
print (feature_version, session_id, path)
with open(path, 'r') as f:
lines = f.readlines()
for ln in lines:
feature_versions_map[feature_version][session_id].append(json.loads(ln))
flush()
|
mit
|
Python
|
|
66bb19a5937091812b80b9c0d98c6f52b9d47165
|
add new package : kafka (#14315)
|
iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack
|
var/spack/repos/builtin/packages/kafka/package.py
|
var/spack/repos/builtin/packages/kafka/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Kafka(Package):
"""
Kafka is used for building real-time data pipelines and streaming apps.
It is horizontally scalable, fault-tolerant, wicked fast, and runs in
production in thousands of companies.
"""
homepage = "https://www-eu.apache.org/dist/kafka"
url = "https://www-eu.apache.org/dist/kafka/2.3.1/kafka_2.12-2.3.1.tgz"
list_url = "https://www-eu.apache.org/dist/kafka/"
list_depth = 1
version('2.13-2.4.0', sha256='c1c5246c7075459687b3160b713a001f5cd1cc563b9a3db189868d2f22aa9110')
version('2.12-2.4.0', sha256='b9582bab0c3e8d131953b1afa72d6885ca1caae0061c2623071e7f396f2ccfee')
version('2.12-2.3.1', sha256='5a3ddd4148371284693370d56f6f66c7a86d86dd96c533447d2a94d176768d2e')
version('2.12-2.3.0', sha256='d86f5121a9f0c44477ae6b6f235daecc3f04ecb7bf98596fd91f402336eee3e7')
version('2.12-2.2.2', sha256='7a1713d2ee929e54b1c889a449d77006513e59afb3032366368b2ebccd9e9ec0')
depends_on('java@8:', type='run')
def url_for_version(self, version):
url = "https://www-eu.apache.org/dist/kafka/{0}/kafka_{1}.tgz"
parent_dir = str(version).split('-')[1]
return url.format(parent_dir, version)
def install(self, spec, prefix):
install_tree('.', prefix)
|
lgpl-2.1
|
Python
|
|
16e6f88e094d4eac8ba154eed5681187f14ab652
|
Create __init__.py
|
overdev/SpaceGame
|
spacegame/__init__.py
|
spacegame/__init__.py
|
"""SpaceGame
A simple 2d space shooter made with python and pygame.
"""
# Make sure you ha python34 and pygame 1.9.1+ installed before run this code.
import pygame
import pygame.locals as c
# this module itself does nothing important, but its good to have pygame
# initialized as soon as possible.
pygame.init()
|
mit
|
Python
|
|
00cdcceb131814b24546c36810682ed78ba866c6
|
Create database column class (DBCol)
|
rlinguri/pyfwk
|
pyfwk/struc/dbcol.py
|
pyfwk/struc/dbcol.py
|
#!/usr/bin/env python
"""
dbcol.py: DBCol is a struct describing an sqlite database table column
"""
# ----------------------------DATABASE-COLUMN-----------------------------#
class DBCol:
name = None
datatype = None
def __init__(self, name, datatype):
self.name = name
self.datatype = datatype
# ---------------------------------EXPORT---------------------------------#
__all__ = ['DBCol']
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
mit
|
Python
|
|
891eef2354e4cf0a552e5c8023c2778bf45a3582
|
add py lib and the first py fiel
|
xylan2004/AciSnippets,xylan2004/AciSnippets,xylan2004/AciSnippets,xylan2004/AciSnippets,xylan2004/AciSnippets
|
pylib/EncodingLib.py
|
pylib/EncodingLib.py
|
# coding=utf-8
import sys
# sys.stdout = codecs.lookup('iso8859-1')[-1](sys.stdout)
print 'System Encoding is', sys.getdefaultencoding()
# python中的str对象其实就是"8-bit string" ,字节字符串,本质上类似java中的byte[]。
s_chinese = '中文'
# 而python中的unicode对象应该才是等同于java中的String对象,或本质上是java的char[]。
s_unicode_chinese = u'中文'
print 's_chinese is str', isinstance(s_chinese, str)
print 's_unicode_chinese is basestring', isinstance(s_unicode_chinese, basestring)
# encoding list: https://docs.python.org/2.4/lib/standard-encodings.html
print u'"中文"的unicode-escape原生字符串', repr(s_chinese.decode('unicode-escape'))
print u'"中文"的gb18030原生字符串', repr(s_chinese.decode('gb18030'))
print u'"中文"的utf-8原生字符串', repr(s_chinese.decode('utf-8'))
print u'"中文"的utf-8原生字符串', repr(s_unicode_chinese) #
print s_unicode_chinese.encode('utf-8')
print s_chinese == s_unicode_chinese
# print u'A good idea\u00AE'.encode('latin-1')
# print s.encode('ascii', 'xmlcharrefreplace')
# accept input and parse to int
# input = int(raw_input('come>'))
# print input + 2
|
bsd-2-clause
|
Python
|
|
2644625e137963ef2982d7ff0a3241bfcbde1ac6
|
Prepend the logs with '...' if they aren't complete
|
sysadmind/cron-sentry,mediacore/raven-cron,ciiol/cron-sentry,incuna/cron-sentry
|
raven_cron/runner.py
|
raven_cron/runner.py
|
from os import getenv, SEEK_END
from raven import Client
from subprocess import call
from tempfile import TemporaryFile
from argparse import ArgumentParser
from sys import argv, exit
from time import time
from .version import VERSION
MAX_MESSAGE_SIZE = 1000
parser = ArgumentParser(description='Wraps commands and reports failing ones to sentry')
# FIXME: Should we also use a configuration file ?
parser.add_argument(
'--dsn',
metavar='SENTRY_DSN',
default=getenv('SENTRY_DSN'),
help='Sentry server address',
)
parser.add_argument(
'--version',
action='version',
version=VERSION,
)
parser.add_argument(
'cmd',
nargs='*',
help='The command to run',
)
def run(args=argv[1:]):
opts = parser.parse_args(args)
runner = CommandReporter(**vars(opts))
runner.run()
class CommandReporter(object):
def __init__(self, cmd, dsn):
if len(cmd) <= 1:
cmd = cmd[0]
self.dsn = dsn
self.command = cmd
self.client = None
def run(self):
buf = TemporaryFile()
start = time()
exit_status = call(self.command, stdout=buf, stderr=buf, shell=True)
if exit_status > 0:
elapsed = time() - start
self.report_fail(exit_status, buf, elapsed)
buf.close()
def report_fail(self, exit_status, buf, elapsed):
if self.dsn is None:
return
# Hack to get the file size since the tempfile doesn't exist anymore
buf.seek(0, SEEK_END)
file_size = buf.tell()
if file_size < MAX_MESSAGE_SIZE:
buf.seek(0)
last_lines = buf.read()
else:
buf.seek(-(MAX_MESSAGE_SIZE-3), SEEK_END)
last_lines = '...' + buf.read()
message="Command %s exited with exit status %d" % (self.command, exit_status)
if self.client is None:
self.client = Client(dsn=self.dsn)
# FIXME: extras are not displayed
x = self.client.captureMessage(
message,
extra={
'command': self.command,
'exit_status': exit_status,
'last_lines': last_lines,
},
time_spent=elapsed
)
|
from os import getenv, SEEK_END
from raven import Client
from subprocess import call
from tempfile import TemporaryFile
from argparse import ArgumentParser
from sys import argv, exit
from time import time
from .version import VERSION
MAX_MESSAGE_SIZE = 1000
parser = ArgumentParser(description='Wraps commands and reports failing ones to sentry')
# FIXME: Should we also use a configuration file ?
parser.add_argument(
'--dsn',
metavar='SENTRY_DSN',
default=getenv('SENTRY_DSN'),
help='Sentry server address',
)
parser.add_argument(
'--version',
action='version',
version=VERSION,
)
parser.add_argument(
'cmd',
nargs='*',
help='The command to run',
)
def run(args=argv[1:]):
opts = parser.parse_args(args)
runner = CommandReporter(**vars(opts))
runner.run()
class CommandReporter(object):
def __init__(self, cmd, dsn):
if len(cmd) <= 1:
cmd = cmd[0]
self.dsn = dsn
self.command = cmd
self.client = None
def run(self):
buf = TemporaryFile()
start = time()
exit_status = call(self.command, stdout=buf, stderr=buf, shell=True)
if exit_status > 0:
elapsed = time() - start
self.report_fail(exit_status, buf, elapsed)
buf.close()
def report_fail(self, exit_status, buf, elapsed):
if self.dsn is None:
return
# Hack to get the file size since the tempfile doesn't exist anymore
buf.seek(0, SEEK_END)
if buf.tell() < MAX_MESSAGE_SIZE:
buf.seek(0)
else:
buf.seek(-MAX_MESSAGE_SIZE, SEEK_END)
last_lines = buf.read()
message="Command %s exited with exit status %d" % (self.command, exit_status)
#print message
if self.client is None:
self.client = Client(dsn=self.dsn)
# FIXME: extras are not displayed
self.client.captureMessage(
message,
extra={
'command': self.command,
'exit_status': exit_status,
'last_lines': last_lines,
},
time_spent=elapsed
)
|
mit
|
Python
|
e66a690271f23fc2a4904e446bbdf0bf6b491a60
|
Add manager migration
|
felliott/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,binoculars/osf.io,chrisseto/osf.io,laurenrevere/osf.io,binoculars/osf.io,adlius/osf.io,HalcyonChimera/osf.io,TomBaxter/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,adlius/osf.io,leb2dg/osf.io,mattclark/osf.io,mattclark/osf.io,brianjgeiger/osf.io,crcresearch/osf.io,cslzchen/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,hmoco/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,cwisecarver/osf.io,mfraezz/osf.io,icereval/osf.io,aaxelb/osf.io,sloria/osf.io,cslzchen/osf.io,hmoco/osf.io,cwisecarver/osf.io,hmoco/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,aaxelb/osf.io,chennan47/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,chrisseto/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,baylee-d/osf.io,laurenrevere/osf.io,caseyrollins/osf.io,binoculars/osf.io,chrisseto/osf.io,caneruguz/osf.io,adlius/osf.io,sloria/osf.io,TomBaxter/osf.io,caneruguz/osf.io,Nesiehr/osf.io,pattisdr/osf.io,TomBaxter/osf.io,caneruguz/osf.io,laurenrevere/osf.io,hmoco/osf.io,erinspace/osf.io,baylee-d/osf.io,felliott/osf.io,felliott/osf.io,erinspace/osf.io,adlius/osf.io,Nesiehr/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,sloria/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,cslzchen/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,chennan47/osf.io,caseyrollins/osf.io,icereval/osf.io,cwisecarver/osf.io,felliott/osf.io,leb2dg/osf.io,mfraezz/osf.io,Nesiehr/osf.io,saradbowman/osf.io,cslzchen/osf.io,cwisecarver/osf.io,Nesiehr/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,caneruguz/osf.io,erinspace/osf.io,caseyrollins/osf.io,leb2dg/osf.io
|
osf/migrations/0028_auto_20170504_1548.py
|
osf/migrations/0028_auto_20170504_1548.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-04 20:48
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0027_auto_20170428_1435'),
]
operations = [
migrations.AlterModelOptions(
name='subject',
options={'base_manager_name': 'objects'},
),
]
|
apache-2.0
|
Python
|
|
84e3475158797a60312068c284aa8d61d9466c6e
|
add model
|
ianzhengnan/blog-python3,ianzhengnan/blog-python3,ianzhengnan/blog-python3
|
www/models.py
|
www/models.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Models for user, blog, comment
'''
__author__ = 'Ian Zheng'
import time, uuid
from www.orm import Model, StringField, BooleanField, IntegerField, FloatField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50')
passwd = StringField(ddl='varchar(50)')
admin = BooleanField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
|
apache-2.0
|
Python
|
|
3e9d5f9cf1c28619422cb012e532e776c4cc8b99
|
fix bug 1369498: remove adi-related tables and stored procedures
|
lonnen/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro,mozilla/socorro,lonnen/socorro,lonnen/socorro,lonnen/socorro,mozilla/socorro,mozilla/socorro
|
alembic/versions/eb8269f6bb85_bug_1369498_remove_adi.py
|
alembic/versions/eb8269f6bb85_bug_1369498_remove_adi.py
|
"""bug 1369498 remove adi
Remove ADI-related tables and stored procedures.
Revision ID: eb8269f6bb85
Revises: 0db05da17ae8
Create Date: 2018-07-19 20:00:52.933551
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'eb8269f6bb85'
down_revision = '0db05da17ae8'
def upgrade():
# Remove tables
for table in ('raw_adi_logs',
'raw_adi',
'build_adu',
'product_adu'):
op.execute('DROP TABLE IF EXISTS %s' % table)
# Remove stored procedures
for proc in ('backfill_adu(date)',
'backfill_build_adu(date)',
'backfill_matviews(date, date, boolean, interval)',
'update_adu(date, boolean)',
'update_build_adu(date, boolean)'):
op.execute('DROP FUNCTION IF EXISTS %s' % proc)
def downgrade():
# No going back
pass
|
mpl-2.0
|
Python
|
|
c413098151bc1cf7d9e37902afe4b110f97b9d57
|
Add koth example (WIP)
|
BHSPitMonkey/vmflib
|
examples/koth_vmflib_example.py
|
examples/koth_vmflib_example.py
|
#!/usr/bin/python3
"""Example map generator: King of the Hill Example
This script demonstrates vmflib by generating a basic "king of the hill" style
map. "King of the hill" is a game mode in Team Fortress 2 where each team tries
to maintain control of a central "control point" for some total defined amount
of time (before the other team does).
After this script executes, the map will be written to: koth_vmflib_example.vmf
This example highlights the use of TF2 game mechanics (in this case the use of
a control point and a goal timer). A simple implementation of team
spawn/resupply areas is also included.
https://developer.valvesoftware.com/wiki/Creating_a_Capture_Point
https://developer.valvesoftware.com/wiki/TF2/King_of_the_Hill
"""
from vmf import *
from vmf.types import Vertex
from vmf.tools import Block
m = vmf.ValveMap()
# Environment and lighting (these values come from Sky List on Valve dev wiki)
# Sun angle S Pitch Brightness Ambience
# 0 300 0 -20 238 218 181 250 224 188 122 250
m.world.skyname = 'sky_harvest_01'
light = vmf.Entity('light_environment')
light.origin = "0 0 0"
light.properties['pitch'] = -20
light.properties['angles'] = "0 300 0"
light.properties['_lightscaleHDR'] = "238 218 181 450"
light.properties['_lightHDR'] = "238 218 181 450"
light.properties['_light'] = "238 218 181 450"
light.properties['_AmbientScaleHDR'] = "1"
light.properties['_ambientHDR'] = "224 188 122 250"
light.properties['_ambient'] = "224 188 122 250"
m.children.append(light)
# Ground
ground = Block(Vertex(0, 0, -32), (2048, 2048, 64), 'nature/dirtground004')
m.world.children.append(ground)
# Skybox
skybox = [
Block(Vertex(0, 0, 2048), (2048, 2048, 64)), # Ceiling
Block(Vertex(-1024, 0, 1024), (64, 2048, 2048)), # Left wall
Block(Vertex(1024, 0, 1024), (64, 2048, 2048)), # Right wall
Block(Vertex(0, 1024, 1024), (2048, 64, 2048)), # Forward wall
Block(Vertex(0, -1024, 1024), (2048, 64, 2048)) # Rear wall
]
for wall in skybox:
wall.set_material('tools/toolsskybox2d')
m.world.children.extend(skybox)
# Control point prop
cp_prop = vmf.Entity('prop_dynamic')
cp_prop.origin = "0 0 0"
cp_prop.properties['targetname'] = "prop_cap_1"
cp_prop.properties['model'] = "models/props_gameplay/cap_point_base.mdl"
m.children.append(cp_prop)
# TODO
# Player spawn areas
# Define RED spawn
spawn_red = vmf.Entity('info_player_teamspawn')
spawn_red.origin = "900 900 10"
spawn_red.properties['TeamNum'] = "2" # RED
spawn_red.properties['angles'] = "0 -135 0"
m.children.append(spawn_red)
# Define BLU spawn
spawn_blu = vmf.Entity('info_player_teamspawn')
spawn_blu.origin = "-900 -900 10"
spawn_blu.properties['TeamNum'] = "3" # BLU
spawn_blu.properties['angles'] = "0 45 0"
m.children.append(spawn_blu)
# Write the map to a file
m.write_vmf('koth_vmflib_example.vmf')
|
bsd-2-clause
|
Python
|
|
4f5ce4af85971ea3c15c90b8a482b611b8bf6c4c
|
move logging code to evaluation directory
|
StackResys/Stack-Resys,StackResys/Stack-Resys,StackResys/Stack-Resys
|
src/evaluation/log.py
|
src/evaluation/log.py
|
""" This module provides a globally accessible
logger created from the config file """
import logging
import os
def _create_logger_from_config():
""" Create the logger from the config file """
conf = {
"name": "StackLogger",
"log_file": "logs/experiment.log",
"format": "%(asctime)s %(levelname)s \n >>> %(message)s",
"level": logging.DEBUG
}
logging.basicConfig(format=conf["format"])
logger = logging.getLogger(conf["name"])
# if need to write to the log file
log_file = conf['log_file']
if log_file is not None:
handler = logging.FileHandler(conf['log_file'])
logger.addHandler(handler)
logger.setLevel(conf["level"])
logger.debug("Logger initialized")
return logger
LOGGER = _create_logger_from_config()
|
bsd-3-clause
|
Python
|
|
6d93ad1df3eb4a50038b7429fe9ed98a8d44af6f
|
add solution for Divide Two Integers
|
zhyu/leetcode,zhyu/leetcode
|
src/divideTwoIntegers.py
|
src/divideTwoIntegers.py
|
class Solution:
# @return an integer
def divide(self, dividend, divisor):
if divisor == 0:
return 2147483647
positive = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
res = 0
while dividend >= divisor:
tmp, i = divisor, 1
while dividend >= tmp:
dividend -= tmp
res += i
tmp <<= 1
i <<= 1
if not positive:
res = -res
return min(max(res, -2147483648), 2147483647)
|
mit
|
Python
|
|
51466e360320267afab41704caecebac0dff1dc2
|
Add a handler for performing client load testing.
|
XiaonuoGantan/pywebsocket,XiaonuoGantan/pywebsocket
|
src/example/bench_wsh.py
|
src/example/bench_wsh.py
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
|
bsd-3-clause
|
Python
|
|
bf7d56c748eb42350c4b37a858ee5d6bb4844efa
|
Add test coverage of existing simple tenant usage policies
|
mahak/nova,openstack/nova,klmitch/nova,klmitch/nova,openstack/nova,klmitch/nova,openstack/nova,mahak/nova,klmitch/nova,mahak/nova
|
nova/tests/unit/policies/test_simple_tenant_usage.py
|
nova/tests/unit/policies/test_simple_tenant_usage.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import simple_tenant_usage
from nova.policies import simple_tenant_usage as policies
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.policies import base
class SimpleTenantUsagePolicyTest(base.BasePolicyTest):
"""Test Simple Tenant Usage APIs policies with all possible context.
This class defines the set of context with different roles
which are allowed and not allowed to pass the policy checks.
With those set of context, it will call the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(SimpleTenantUsagePolicyTest, self).setUp()
self.controller = simple_tenant_usage.SimpleTenantUsageController()
self.req = fakes.HTTPRequest.blank('')
# Check that admin or and owner is able to get
# the tenant usage statistics for a specific tenant.
self.admin_or_owner_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context, self.project_member_context,
self.project_reader_context, self.project_foo_context]
# Check that non-admin/owner is not able to get
# the tenant usage statistics for a specific tenant.
self.admin_or_owner_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.other_project_member_context,
self.other_project_reader_context,
]
# Check that admin is able to get the tenant usage statistics.
self.admin_authorized_contexts = [
self.legacy_admin_context, self.system_admin_context,
self.project_admin_context]
# Check that non-admin is not able to get the tenant usage statistics.
self.admin_unauthorized_contexts = [
self.system_member_context, self.system_reader_context,
self.system_foo_context, self.project_member_context,
self.other_project_member_context,
self.project_foo_context, self.project_reader_context,
self.other_project_reader_context,
]
def test_index_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'list'
self.common_policy_check(self.admin_authorized_contexts,
self.admin_unauthorized_contexts,
rule_name,
self.controller.index,
self.req)
def test_show_simple_tenant_usage_policy(self):
rule_name = policies.POLICY_ROOT % 'show'
self.common_policy_check(self.admin_or_owner_authorized_contexts,
self.admin_or_owner_unauthorized_contexts,
rule_name,
self.controller.show,
self.req, self.project_id)
class SimpleTenantUsageScopeTypePolicyTest(SimpleTenantUsagePolicyTest):
"""Test Simple Tenant Usage APIs policies with system scope enabled.
This class set the nova.conf [oslo_policy] enforce_scope to True
so that we can switch on the scope checking on oslo policy side.
It defines the set of context with scoped token
which are allowed and not allowed to pass the policy checks.
With those set of context, it will run the API operation and
verify the expected behaviour.
"""
def setUp(self):
super(SimpleTenantUsageScopeTypePolicyTest, self).setUp()
self.flags(enforce_scope=True, group="oslo_policy")
|
apache-2.0
|
Python
|
|
d45bdf62d54c0a5efc77be639f4259807a286d6e
|
Create pour-water.py
|
kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015
|
Python/pour-water.py
|
Python/pour-water.py
|
# Time: O(v * n)
# Space: O(1)
# We are given an elevation map, heights[i] representing the height of the terrain at that index.
# The width at each index is 1. After V units of water fall at index K, how much water is at each index?
#
# Water first drops at index K and rests on top of the highest terrain or water at that index.
# Then, it flows according to the following rules:
#
# If the droplet would eventually fall by moving left, then move left.
# Otherwise, if the droplet would eventually fall by moving right, then move right.
# Otherwise, rise at it's current position.
# Here, "eventually fall" means that the droplet will eventually be at a lower level
# if it moves in that direction.
# Also, "level" means the height of the terrain plus any water in that column.
# We can assume there's infinitely high terrain on the two sides out of bounds of the array.
# Also, there could not be partial water being spread out evenly on more than 1 grid block -
# each unit of water has to be in exactly one block.
#
# Example 1:
# Input: heights = [2,1,1,2,1,2,2], V = 4, K = 3
# Output: [2,2,2,3,2,2,2]
# Explanation:
# # #
# # #
# ## # ###
# #########
# 0123456 <- index
#
# The first drop of water lands at index K = 3:
#
# # #
# # w #
# ## # ###
# #########
# 0123456
#
# When moving left or right, the water can only move to the same level or a lower level.
# (By level, we mean the total height of the terrain plus any water in that column.)
# Since moving left will eventually make it fall, it moves left.
# (A droplet "made to fall" means go to a lower height than it was at previously.)
#
# # #
# # #
# ## w# ###
# #########
# 0123456
#
# Since moving left will not make it fall, it stays in place. The next droplet falls:
#
# # #
# # w #
# ## w# ###
# #########
# 0123456
#
# Since the new droplet moving left will eventually make it fall, it moves left.
# Notice that the droplet still preferred to move left,
# even though it could move right (and moving right makes it fall quicker.)
#
# # #
# # w #
# ## w# ###
# #########
# 0123456
#
# # #
# # #
# ##ww# ###
# #########
# 0123456
#
# After those steps, the third droplet falls.
# Since moving left would not eventually make it fall, it tries to move right.
# Since moving right would eventually make it fall, it moves right.
#
# # #
# # w #
# ##ww# ###
# #########
# 0123456
#
# # #
# # #
# ##ww#w###
# #########
# 0123456
#
# Finally, the fourth droplet falls.
# Since moving left would not eventually make it fall, it tries to move right.
# Since moving right would not eventually make it fall, it stays in place:
#
# # #
# # w #
# ##ww#w###
# #########
# 0123456
#
# The final answer is [2,2,2,3,2,2,2]:
#
# #
# #######
# #######
# 0123456
#
# Example 2:
# Input: heights = [1,2,3,4], V = 2, K = 2
# Output: [2,3,3,4]
# Explanation:
# The last droplet settles at index 1,
# since moving further left would not cause it to eventually fall to a lower height.
#
# Example 3:
# Input: heights = [3,1,3], V = 5, K = 1
# Output: [4,4,4]
#
# Note:
# - heights will have length in [1, 100] and contain integers in [0, 99].
# - V will be in range [0, 2000].
# - K will be in range [0, heights.length - 1].
class Solution(object):
def pourWater(self, heights, V, K):
"""
:type heights: List[int]
:type V: int
:type K: int
:rtype: List[int]
"""
for _ in xrange(V):
for d in (-1, 1):
i = best = K
while 0 <= i+d < len(heights) and \
heights[i+d] <= heights[i]:
if heights[i+d] < heights[i]: best = i+d
i += d
if best != K:
break
heights[best] += 1
return heights
|
mit
|
Python
|
|
8dbc2dd48d1d0e25972ad359464694d352d58705
|
add transpilation of the arangodb social graph
|
tariqdaouda/pyArango,tariqdaouda/pyArango
|
examples/createSocialGraph.py
|
examples/createSocialGraph.py
|
#!/usr/bin/python
import sys
from pyArango.connection import *
from pyArango.graph import *
from pyArango.collection import *
class Social(object):
class male(Collection) :
_fields = {
"name" : Field()
}
class female(Collection) :
_fields = {
"name" : Field()
}
class relation(Edges) :
_fields = {
"number" : Field()
}
class social(Graph) :
_edgeDefinitions = (EdgeDefinition ('relation',
fromCollections = ["female", "male"],
toCollections = ["female", "male"]),)
_orphanedCollections = []
def __init__(self):
self.conn = Connection(username="USERNAME", password="SECRET")
self.db = self.conn["_system"]
if self.db.hasGraph('social'):
raise Exception("The social graph was already provisioned! remove it first")
self.female = self.db.createCollection("female")
self.male = self.db.createCollection("male")
self.relation = self.db.createCollection("relation")
g = self.db.createGraph("social")
a = g.createVertex('female', {"name": 'Alice', "_key": 'alice'});
b = g.createVertex('male', {"name": 'Bob', "_key": 'bob'});
c = g.createVertex('male', {"name": 'Charly', "_key": 'charly'});
d = g.createVertex('female', {"name": 'Diana', "_key": 'diana'});
a.save()
b.save()
c.save()
d.save()
g.link('relation', a, b, {"type": 'married', "_key": 'aliceAndBob'})
g.link('relation', a, c, {"type": 'friend', "_key": 'aliceAndCharly'})
g.link('relation', c, d, {"type": 'married', "_key": 'charlyAndDiana'})
g.link('relation', b, d, {"type": 'friend', "_key": 'bobAndDiana'})
Social()
|
apache-2.0
|
Python
|
|
06d2d7dd155f5ac888a8c0d2d9c45c61b95de714
|
update tests for thresholding for ecm
|
danlurie/C-PAC,danlurie/C-PAC,roijo/C-PAC_complexitytools,danlurie/C-PAC,sgiavasis/C-PAC,roijo/C-PAC_complexitytools,sgiavasis/C-PAC,roijo/C-PAC_complexitytools,sgiavasis/C-PAC,roijo/C-PAC_complexitytools,sgiavasis/C-PAC,danlurie/C-PAC
|
CPAC/network_centrality/tests/test_thresh_and_sum.py
|
CPAC/network_centrality/tests/test_thresh_and_sum.py
|
"""
This tests the functions in network_centrality/thresh_and_sum.pyx
"""
import os, sys
import numpy as np
from numpy.testing import *
from nose.tools import ok_, eq_, raises, with_setup
from nose.plugins.attrib import attr # http://nose.readthedocs.org/en/latest/plugins/attrib.html
import sys
sys.path.insert(0, '/home2/data/Projects/CPAC_Regression_Test/zarrar/centrality_tests/lib/nipype')
sys.path.insert(1, '/home2/data/Projects/CPAC_Regression_Test/zarrar/centrality_tests/lib/C-PAC')
# For eigen centrality
from CPAC.network_centrality.thresh_and_sum import \
thresh_binarize_float, thresh_binarize_double, \
thresh_weighted_float, thresh_weighted_double, \
thresh_transform_weighted_float, thresh_transform_weighted_double
# For degree centrality
from CPAC.network_centrality.thresh_and_sum import \
centrality_binarize_float, centrality_binarize_double, \
centrality_weighted_float, centrality_weighted_double, \
centrality_both_float, centrality_both_double # these aren't currently used
###
# TEST thresholding of matrices for eigenvector centrality
###
def test_thresh_binarize():
print "testing threshold binarize"
nvoxs = 1000
r_value = 0.2
corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')
ref = 1*(corr_matrix>r_value)
comp = corr_matrix.copy()
thresh_binarize_float(comp, r_value)
assert_equal(ref, comp)
def test_thresh_weighted():
print "testing threshold weighted"
nvoxs = 1000
r_value = 0.2
corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')
ref = corr_matrix*(corr_matrix>r_value)
comp = corr_matrix.copy()
thresh_weighted_float(comp, r_value)
assert_equal(ref, comp)
def test_thresh_transform_weighted():
print "testing threshold weighted"
nvoxs = 1000
r_value = 0.2
corr_matrix = np.random.random((nvoxs, nvoxs)).astype('float32')
ref = ((1.0+corr_matrix)/2.0)*(corr_matrix>r_value)
comp = corr_matrix.copy()
thresh_transform_weighted_float(comp, r_value)
assert_equal(ref, comp)
###
# TEST centrality functions
###
|
bsd-3-clause
|
Python
|
|
8f3767384b1173c2a9921fce055fdec0e2f1bacc
|
add backupscript
|
rprader/SimpleBackupScript
|
backupscript.py
|
backupscript.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime, os, shutil
PATH_CONFIG = {
'local_backup_path': '/Users/raphaelprader/Desktop/fake_bu_path'
}
DB_CREDENTIALS = {
'username': 'root',
'password': '',
'host': 'localhost',
'db_names': [
'db_www_sprachtandem_ch',
]
}
GDRIVE_CREDENTIALS = {
'remote_name': 'GoogleDrive',
'remote_path': 'NetcupBackup'
}
class Backup(object):
def __init__(self, db_credentials, path_config, gdrive_credentials):
self.db_credentials = db_credentials
self.path_config = path_config
self.gdrive_credentials = gdrive_credentials
self.log_file = os.path.join(self.path_config['local_backup_path'], '_completed_backups.log')
self.do_backup()
def get_readable_datetime(self):
return datetime.datetime.now().strftime('%a, %d %b %Y')
def get_today_folder_name(self):
return datetime.datetime.now().strftime('%m-%d-%Y')
def do_backup(self):
print '--------- START BACKUP: %s ---------' % self.get_readable_datetime()
todays_folder = os.path.join(self.path_config['local_backup_path'], self.get_today_folder_name())
"""
Remove existing and recreate temporary backup dirs.
"""
if os.path.exists(todays_folder):
shutil.rmtree(todays_folder)
os.mkdir(todays_folder)
db_backups_path = os.path.join(todays_folder, '_mysql_backup')
os.mkdir(db_backups_path)
"""
Define db-dump command. If no password required, -p argument is not used.
"""
dumpcmd = 'mysqldump -h %(host)s -u %(user)s -p %(password)s %(db)s > %(db_backup_file_path)s'
if self.db_credentials['password'] == '':
dumpcmd = 'mysqldump -h %(host)s -u %(user)s %(db)s > %(db_backup_file_path)s'
"""
Loop through all databases and dump them into the backup folder.
"""
for db in self.db_credentials['db_names']:
execute_dump = dumpcmd % {
'host': self.db_credentials['host'],
'user': self.db_credentials['username'],
'password': self.db_credentials['password'],
'db': db,
'db_backup_file_path': '%s/%s.sql' % (db_backups_path, db)
}
os.system(execute_dump)
print 'Dumped DB %s' % db
"""
Sync backup-folder to Google Drive
"""
sync_command = 'rclone copy %(source_dir)s %(remote_name)s:%(remote_path)s' % {
'source_dir': todays_folder,
'remote_name': self.gdrive_credentials['remote_name'],
'remote_path': self.gdrive_credentials['remote_path']
}
os.system(sync_command)
print 'Folder synced to Google Drive'
"""
Delete backup-folder
"""
shutil.rmtree(todays_folder)
self.end_backup()
def end_backup(self):
# delete files
print '\n---------- END BACKUP: %s ----------' % self.get_readable_datetime()
backupInstance = Backup(db_credentials=DB_CREDENTIALS, path_config=PATH_CONFIG, gdrive_credentials=GDRIVE_CREDENTIALS)
|
mit
|
Python
|
|
82fa373c46581e84f8e5ea0da733ef5c65928165
|
Update MultipleParticleSystems.pyde
|
tildebyte/processing.py,mashrin/processing.py,jdf/processing.py,jdf/processing.py,mashrin/processing.py,tildebyte/processing.py,tildebyte/processing.py,jdf/processing.py,mashrin/processing.py
|
mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde
|
mode/examples/Topics/Simulate/MultipleParticleSystems/MultipleParticleSystems.pyde
|
"""
Multiple Particle Systems
by Daniel Shiffman.
Click the mouse to generate a burst of particles
at mouse location.
Each burst is one instance of a particle system
with Particles and CrazyParticles (a subclass of Particle).
Note use of Inheritance and Polymorphism here.
"""
from crazy_particle import CrazyParticle
from particle import Particle
from particle_system import ParticleSystem
systems = None
def setup():
global systems
size(640, 360)
systems = []
def draw():
background(0)
for ps in systems:
ps.run()
ps.addParticle()
if not systems:
fill(255)
textAlign(CENTER)
text("click mouse to add particle systems", width / 2, height / 2)
def mousePressed():
systems.append(ParticleSystem(1, PVector(mouseX, mouseY)))
|
"""
Multiple Particle Systems
by Daniel Shiffman.
Click the mouse to generate a burst of particles
at mouse location.
Each burst is one instance of a particle system
with Particles and CrazyParticles (a subclass of Particle).
Note use of Inheritance and Polymorphism here.
"""
from crazy_particle import CrazyParticle
from particle import Particle
from particle_system import ParticleSystem
systems = None
def setup():
global systems
size(640, 360)
systems = []
def draw():
background(0)
for ps in systems:
ps.run()
ps.addParticle()
if not systems:
fill(255)
textAlign(CENTER)
text("click mouse to add particle systems", width / 2, height / 2)
def mousePressed():
systems.append(ParticleSystem(1, PVector(mouseX, mouseY)))
|
apache-2.0
|
Python
|
70bc8413dc3748f606e76f5e4e4abcde6b851cdd
|
Read and UDP
|
wadda/Bari
|
bari_spitter.py
|
bari_spitter.py
|
#!/usr/bin/python3
# coding=utf-8
"""reads barometric pressure sensor and writes it to UDP socket with timestamp
"""
import socket
from datetime import datetime
from time import sleep
from time import time
import ms5637
__author__ = 'Moe'
__copyright__ = 'Copyright 2017 Moe'
__license__ = 'MIT'
__version__ = '0.0.2'
# Bari sensor of MS5637
sensor = ms5637.Chip()
bari_file = 'bari_data.csv'
UDP_IP = "192.168.0.2" # Big Machine
UDP_PORT = 6421 # bARI port
MESSAGE = "Get ready to rumble."
print("UDP target IP:", UDP_IP)
print("UDP target port:", UDP_PORT)
print("message:", MESSAGE)
while True:
try:
now = time()
humantime = datetime.fromtimestamp(now).strftime('%Y-%m-%dT%H:%M:%S')
pressure, _temperature = sensor.get_data()
except OSError:
sensor.__init__()
pressure, temperatue = sensor.get_data()
finally:
outstring = str(humantime) + ', ' + str(pressure)
outstring = outstring.encode()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(outstring, (UDP_IP, UDP_PORT))
sleep(1)
|
mit
|
Python
|
|
6811b4014fc0267edf4d397ccab86b0e986c2215
|
Implement the INFO command
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
txircd/modules/rfc/cmd_info.py
|
txircd/modules/rfc/cmd_info.py
|
from twisted.plugin import IPlugin
from twisted.words.protocols import irc
from txircd import version
from txircd.module_interface import Command, ICommand, IModuleData, ModuleData
from zope.interface import implements
class InfoCommand(ModuleData, Command):
implements(IPlugin, IModuleData, ICommand)
name = "InfoCommand"
core = True
def hookIRCd(self, ircd):
self.ircd = ircd
def userCommands(self):
return [ ("INFO", 1, self) ]
def parseParams(self, user, params, prefix, tags):
return {}
def execute(self, user, data):
user.sendMessage(irc.RPL_INFO, ":{} is running txircd-{}".format(self.ircd.name, version))
user.sendMessage(irc.RPL_INFO, ":Originally developed for the Desert Bus for Hope charity fundraiser (http://desertbus.org)")
user.sendMessage(irc.RPL_INFO, ":")
user.sendMessage(irc.RPL_INFO, ":Developed by ElementalAlchemist <ElementAlchemist7@gmail.com>")
user.sendMessage(irc.RPL_INFO, ":Contributors:")
user.sendMessage(irc.RPL_INFO, ": Heufneutje")
user.sendMessage(irc.RPL_ENDOFINFO, ":End of /INFO list")
return True
infoCmd = InfoCommand()
|
bsd-3-clause
|
Python
|
|
0f199556df6bd498f01cccdce6316b733c876acc
|
Add migration file
|
inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree
|
InvenTree/part/migrations/0046_auto_20200804_0107.py
|
InvenTree/part/migrations/0046_auto_20200804_0107.py
|
# Generated by Django 3.0.7 on 2020-08-04 01:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('part', '0045_auto_20200605_0932'),
]
operations = [
migrations.AlterField(
model_name='partcategory',
name='default_keywords',
field=models.CharField(blank=True, help_text='Default keywords for parts in this category', max_length=250, null=True),
),
]
|
mit
|
Python
|
|
f8fcae7dd7579b51c3c204337dfa70c702fdbf38
|
add new namedtuple Chunk
|
alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,alphatwirl/alphatwirl,TaiSakuma/AlphaTwirl
|
AlphaTwirl/HeppyResult/Chunk.py
|
AlphaTwirl/HeppyResult/Chunk.py
|
# Tai Sakuma <tai.sakuma@cern.ch>
##__________________________________________________________________||
import collections
##__________________________________________________________________||
Chunk = collections.namedtuple('Chunk', 'inputPath treeName maxEvents start component name')
##__________________________________________________________________||
|
bsd-3-clause
|
Python
|
|
16004f8138e16da51a5a1df22f3a23b1c9146256
|
Create YelpReviewUsefulnessPrediction_v1.py
|
firiceguo/Recommendation-NLP,firiceguo/Recommendation-NLP
|
src/yyliu/YelpReviewUsefulnessPrediction_v1.py
|
src/yyliu/YelpReviewUsefulnessPrediction_v1.py
|
import os
import sys
import numpy as np
import nltk
# Set the path for spark installation
# this is the path where you have built spark using sbt/sbt assembly
os.environ['SPARK_HOME'] = "/Applications/spark-2.1.0"
# os.environ['SPARK_HOME'] = "/home/jie/d2/spark-0.9.1"
# Append to PYTHONPATH so that pyspark could be found
sys.path.append("/Applications/spark-2.1.0/python")
# Now we are ready to import Spark Modules
try:
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.ml import Pipeline
from pyspark.ml.linalg import Vectors, VectorUDT
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.feature import *
from pyspark.ml.evaluation import BinaryClassificationEvaluator
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.sql import Row
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.sql import SparkSession
from pyspark.mllib.linalg import SparseVector, DenseVector
from pyspark.ml.feature import CountVectorizer
from pyspark.ml.clustering import LDA
from pyspark.ml.feature import Word2Vec
from pyspark.mllib.linalg import Vectors
from pyspark.ml.feature import Tokenizer, RegexTokenizer
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType
from pyspark.ml.feature import StopWordsRemover
from pyspark.ml.feature import HashingTF, IDF, Tokenizer
from pyspark.ml import Pipeline
from pyspark.ml.regression import RandomForestRegressor
from pyspark.ml.feature import VectorIndexer
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType, FloatType
except ImportError as e:
print ("Error importing Spark Modules", e)
sys.exit(1)
sc = SparkContext()
spark = SparkSession \
.builder \
.appName("Yelp Review Usefulness Prediction & Baseline") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
"""
Load Review data and read it into a Dataframe named as reviewDF, select useful columns and save it as selectreviewDF
"""
reviewDF = spark.read.json('/Users/yanyunliu/Downloads/yelp_training_set/yelp_training_set_review.json')
reviewDF.printSchema()
selectreviewDF = reviewDF.select(reviewDF['review_id'],reviewDF['business_id'],reviewDF['user_id'],reviewDF['text'],reviewDF['votes.useful']) \
.withColumnRenamed('useful','label') \
.withColumnRenamed('text','review_text')
selectreviewDF = selectreviewDF.limit(100)
"""
Data Preprocessing:
1. Tokenize the text
2. Remove stopword
3. Convert Text into Vector
4. Calculate IDF
5. Load tf-idf features into LDA topic extraction model
"""
tokenizer = Tokenizer(inputCol="review_text", outputCol="tokens_word")
remover = StopWordsRemover(inputCol="tokens_word", outputCol="filtered_tokens_word")
cv = CountVectorizer(inputCol="filtered_tokens_word", outputCol="raw_features", minDF=2.0)
idf = IDF(inputCol="raw_features", outputCol="features")
lda = LDA(k=30, maxIter=10)
"""
Use RandomForestRegressor to predict usefulness
"""
rf = RandomForestRegressor(featuresCol="topicDistribution")
pipeline = Pipeline(stages=[tokenizer,remover, cv, idf, lda, rf])
(trainingData, testData) = selectreviewDF.randomSplit([0.7, 0.3])
evaluator_rmse = RegressionEvaluator(
labelCol="label", predictionCol="prediction", metricName="rmse")
paramGrid = ParamGridBuilder() \
.addGrid(cv.vocabSize, [150, 200, 250]) \
.build()
# .addGrid(lda.k, [20, 30,50]) \
#
crossval = CrossValidator(estimator=pipeline, \
estimatorParamMaps=paramGrid,\
evaluator=evaluator_rmse,\
numFolds=4) # use 3+ folds in practice
cvModel = crossval.fit(trainingData)
predictions = cvModel.transform(testData)
"""
Evaluation for LDA Prediction
"""
rmse = evaluator_rmse.evaluate(predictions)
print("Root Mean Squared Error (RMSE) on test data = %g" % rmse)
"""
Evaluation for baseline model (Global Average)
"""
baseline_globalavg = trainingData.select('label').agg({"label": "avg"}).collect()[0]['avg(label)']
testData = testData.select('*', lit(float(baseline_globalavg)).alias('baseline_prediction'))
evaluator_rmse_baseline = RegressionEvaluator(labelCol="label", predictionCol="baseline_prediction", metricName="rmse")
rmse = evaluator_rmse_baseline.evaluate(testData)
print("Root Mean Squared Error (RMSE) on test data for baseline = %g" % rmse)
print cvModel.explainParams()
sc.stop()
|
mit
|
Python
|
|
f531eb7d1734d6d715893356a50d11eee6bc009a
|
Test mobile set password form
|
dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq
|
corehq/apps/users/tests/forms.py
|
corehq/apps/users/tests/forms.py
|
from collections import namedtuple
from django.contrib.auth import get_user_model
from django.test import TestCase
from corehq.apps.users.forms import SetUserPasswordForm
Project = namedtuple('Project', ['name', 'strong_mobile_passwords'])
class TestSetUserPasswordForm(TestCase):
def setUp(self):
super(TestSetUserPasswordForm, self).setUp()
self.project = Project('mydomain', True)
self.user = get_user_model().objects.create_user('tswift')
def tearDown(self):
self.user.delete()
super(TestSetUserPasswordForm, self).tearDown()
def form(self, password):
return SetUserPasswordForm(self.project, user_id=self.user.id, user=self.user, data={
"new_password1": password,
"new_password2": password,
})
def test_weak_password(self):
form = self.form("Taylor")
self.assertFalse(form.is_valid())
def test_strong_password(self):
form = self.form("TaylorSwift89!")
self.assertTrue(form.is_valid())
|
bsd-3-clause
|
Python
|
|
b3f436e14df37d4af602dcdc9882ce27c97fabd4
|
Add a yaml sdb module (#37563)
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
salt/sdb/yaml.py
|
salt/sdb/yaml.py
|
# -*- coding: utf-8 -*-
'''
Pull sdb values from a YAML file
:maintainer: SaltStack
:maturity: New
:platform: all
.. versionadded:: Nitrogen
Configuration:
.. code-block:: yaml
my-yaml-file:
driver: yaml
files:
- /path/to/foo.yaml
- /path/to/bar.yaml
The files are merged together and the result is searched using the same
mechanism Salt uses for searching Grains and Pillar data structures.
Optional configuration:
.. code-block:: yaml
my-yaml-file:
driver: yaml
files:
- /path/to/foo.yaml
- /path/to/bar.yaml
merge:
strategy: smart
merge_list: false
'''
# import python libs
from __future__ import absolute_import
import logging
import salt.exceptions
import salt.loader
import salt.utils
import salt.utils.dictupdate
log = logging.getLogger(__name__)
__func_alias__ = {
'set_': 'set'
}
def set_(*args, **kwargs):
'''
Setting a value is not supported; edit the YAML files directly
'''
raise salt.exceptions.NotImplemented()
def get(key, profile=None): # pylint: disable=W0613
'''
Get a value from the REST interface
'''
data = _get_values(profile)
return salt.utils.traverse_dict_and_list(data, key, None)
def _get_values(profile=None):
'''
Retrieve all the referenced files, deserialize, then merge them together
'''
profile = profile or {}
serializers = salt.loader.serializers(__opts__)
ret = {}
for fname in profile.get('files', []):
try:
with salt.utils.flopen(fname) as f:
contents = serializers.yaml.deserialize(f)
ret = salt.utils.dictupdate.merge(ret, contents,
**profile.get('merge', {}))
except IOError:
log.error("File not found '{0}'".format(fname))
except TypeError:
log.error("Error deserializing sdb file '{0}'".format(fname))
return ret
|
apache-2.0
|
Python
|
|
a2151435057e3e42b8ecf6323b8276f4698fdd15
|
Create getTermSize.py
|
sshortess/Handy-Utilities,sshortess/Handy-Utilities
|
ssh_utils/getTermSize.py
|
ssh_utils/getTermSize.py
|
#!/usr/bin/env python
""" getTerminalSize()
- get width and height of console
- works on linux,os x,windows,cygwin(windows)
"""
__all__=['getTerminalSize']
def getTerminalSize():
import platform
current_os = platform.system()
tuple_xy=None
if current_os == 'Windows':
tuple_xy = _getTerminalSize_windows()
if tuple_xy is None:
tuple_xy = _getTerminalSize_tput()
# needed for window's python in cygwin's xterm!
if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):
tuple_xy = _getTerminalSize_linux()
if tuple_xy is None:
print "default"
tuple_xy = (80, 25) # default value
return tuple_xy
def _getTerminalSize_windows():
res=None
try:
from ctypes import windll, create_string_buffer
# stdin handle is -10
# stdout handle is -11
# stderr handle is -12
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
except:
return None
if res:
import struct
(bufx, bufy, curx, cury, wattr,
left, top, right, bottom, maxx, maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
sizex = right - left + 1
sizey = bottom - top + 1
return sizex, sizey
else:
return None
def _getTerminalSize_tput():
# get terminal width
# src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
try:
import subprocess
proc=subprocess.Popen(["tput", "cols"],stdin=subprocess.PIPE,stdout=subprocess.PIPE)
output=proc.communicate(input=None)
cols=int(output[0])
proc=subprocess.Popen(["tput", "lines"],stdin=subprocess.PIPE,stdout=subprocess.PIPE)
output=proc.communicate(input=None)
rows=int(output[0])
return (cols,rows)
except:
return None
def _getTerminalSize_linux():
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,'1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
return None
return int(cr[1]), int(cr[0])
if __name__ == "__main__":
sizex,sizey=getTerminalSize()
print 'width =',sizex,'height =',sizey
|
unlicense
|
Python
|
|
6c38414d899b00cf0ba386e59721354f3b2a799b
|
Update bechdel.py
|
beccasjames/learn_python,beccasjames/learn_python
|
bechdel.py
|
bechdel.py
|
# Difficulty level: Advanced
# Goal #1: Create a program that will print out a list of movie titles and a set of ratings defined below into a particular format.
# First, choose any five movies you want.
# Next, look each movie up manually to find out four pieces of information:
# Their parental guidance rating (G, PG, PG-13, R)
# Their Bechdel Test Rating (See http://shannonvturner.com/bechdel or http://bechdeltest.com/)
# Their IMDB Rating from 0 - 10 (See http://imdb.com/)
# Their genre according to IMDB
# You'll need a variable for movie_titles, a variable for parental_rating, a variable for bechdel_rating, a variable for imdb_rating, and a variable for genre.
# Since you have five sets of facts about five movies, you'll want to use lists to hold these pieces of information.
titles = ['American Sniper','Birdman','Boyhood','The Grand Budapest Hotel','The Imitation Game']
parental_rating = ['R', 'R', 'R', 'R', 'PG-13']
bechdel_rating = ['1', '3', '3', '1', '2']
imdb_rating = ['7.4', '8.0','8.1', '8.1', '8.2']
genre = ['Action / Biography / Drama', 'Comedy / Drama', 'Drama', 'Adventure / Comedy / Drama', 'Biography / Drama / Thriller']
# Once all of your information is stored in lists, loop through those lists to print out information with each part separated by a comma, like this:
for titles, parental_rating, bechdel_rating, imdb_rating, genre in zip(titles, parental_rating, bechdel_rating, imdb_rating, genre):
print "{0}, {1}, {2}, {3}, {4}".format(titles, parental_rating, bechdel_rating, imdb_rating, genre)
|
unlicense
|
Python
|
|
5b0f490cb527b0940dc322b060069f44fb29accd
|
Add git versioning
|
LABSN/expyfun,Eric89GXL/expyfun,rkmaddox/expyfun,drammock/expyfun,lkishline/expyfun
|
expyfun/_git.py
|
expyfun/_git.py
|
# -*- coding: utf-8 -*-
|
bsd-3-clause
|
Python
|
|
bcfac4b7ea5b10b5b6e84a756d716ef6c47cdd62
|
Create finalproject.py
|
jasper-meyer/Final-Project
|
finalproject.py
|
finalproject.py
|
code!
|
mit
|
Python
|
|
2cdf030ee6d8a545c071f2c033d88c6c2091ef08
|
Add freeze_graph tool
|
Zehaos/MobileNet,Zehaos/MobileNet
|
freeze_graph.py
|
freeze_graph.py
|
# code from https://blog.metaflow.fr/tensorflow-how-to-freeze-a-model-and-serve-it-with-a-python-api-d4f3596b3adc
# Thanks Morgan
import os, argparse
import tensorflow as tf
from tensorflow.python.framework import graph_util
dir = os.path.dirname(os.path.realpath(__file__))
def freeze_graph(model_folder):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
output_node_names = "clone_0/MobileNet/Predictions/Softmax"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def, # The graph_def is used to retrieve the nodes
output_node_names.split(",") # The output node names are used to select the usefull nodes
)
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_folder", type=str, help="Model folder to export")
args = parser.parse_args()
freeze_graph(args.model_folder)
|
apache-2.0
|
Python
|
|
f632bb5e63035e491ec74bdbcb0537cf03fa2769
|
Add salt states for rbenv
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
salt/states/rbenv.py
|
salt/states/rbenv.py
|
import re
def _check_rbenv(ret,runas=None):
if not __salt__['rbenv.is_installed'](runas):
ret['result'] = False
ret['comment'] = 'Rbenv is not installed.'
return ret
def _ruby_installed(ret, ruby, runas=None):
default = __salt__['rbenv.default'](runas=runas)
for version in __salt__['rbenv.versions'](runas):
if version == ruby:
ret['result'] = True
ret['comment'] = 'Requested ruby exists.'
ret['default'] = default == ruby
break
return ret
def _check_and_install_ruby(ret, ruby, default=False, runas=None):
ret = _ruby_installed(ret, ruby, runas=runas)
if not ret['result']:
if __salt__['rbenv.install_ruby'](ruby, runas=runas):
ret['result'] = True
ret['changes'][ruby] = 'Installed'
ret['comment'] = 'Successfully installed ruby'
ret['default'] = default
else:
ret['result'] = False
ret['comment'] = 'Could not install ruby.'
return ret
if default:
__salt__['rbenv.default'](ruby,runas=runas)
return ret
def installed(name,default=False,runas=None):
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name.startswith('ruby-'):
name = re.sub(r'^ruby-','',name)
if __opts__['test']:
ret['comment'] = 'Ruby {0} is set to be installed'.format(name)
return ret
ret = _check_rbenv(ret, runas)
if ret['result'] == False:
if not __salt__['rbenv.install'](runas):
ret['comment'] = 'Rbenv failed to install'
return ret
else:
return _check_and_install_ruby(ret, name, default, runas=runas)
else:
return _check_and_install_ruby(ret, name, default, runas=runas)
def _check_and_uninstall_ruby(ret, ruby, runas=None):
ret = _ruby_installed(ret, ruby, runas=runas)
if ret['result']:
if ret['default']:
__salt__['rbenv.default']('system', runas=runas)
if __salt__['rbenv.uninstall_ruby'](ruby, runas=runas):
ret['result'] = True
ret['changes'][ruby] = 'Uninstalled'
ret['comment'] = 'Successfully removed ruby'
return ret
else:
ret['result'] = False
ret['comment'] = 'Failed to uninstall ruby'
return ret
else:
ret['result'] = True
ret['comment'] = 'Ruby {0} is already absent'.format(ruby)
return ret
def absent(name,runas=None):
ret = {'name': name, 'result': None, 'comment': '', 'changes': {}}
if name.startswith('ruby-'):
name = re.sub(r'^ruby-','',name)
if __opts__['test']:
ret['comment'] = 'Ruby {0} is set to be uninstalled'.format(name)
return ret
ret = _check_rbenv(ret, runas)
if ret['result'] == False:
ret['result'] = True
ret['comment'] = 'Rbenv not installed, {0} not either'.format(name)
return ret
else:
return _check_and_uninstall_ruby(ret, name, runas=runas)
|
apache-2.0
|
Python
|
|
3cffe6ce42702a1aaa4a01ae1f90962a00fcb911
|
Add yum module
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
salt/modules/yum.py
|
salt/modules/yum.py
|
'''
Support for YUM
'''
import subprocess
def _list_removed(old, new):
'''
List the pachages which have been removed between the two package objects
'''
pkgs = []
for pkg in old:
if not new.has_key():
pkgs.append(pkg)
return pkgs
def list_pkgs():
'''
List the packages currently installed in a dict:
{'<package_name>': '<version>'}
CLI Example:
salt '*' yum.list_pkgs
'''
cmd = 'rpm -qa --qf '%{NAME}\t%{VERSION}-%{RELEASE}\n''
ret = {}
out = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE).communicate()[0].split('\n')
for line in out:
if not line.count(' '):
continue
comps = line.split()
ret[comps[0]] = comps[1]
return ret
def refresh_db():
'''
Since yum refreshes the database automatically, this runs a yum clean,
so that the next yum operation will have a clean database
CLI Example:
salt '*' yum.refresh_db
'''
cmd = 'yum clean dbcache'
subprocess.call(cmd, shell=True)
return True
def install(pkg, refresh=False):
'''
Install the passed package, add refresh=True to clean out the yum database
before executing
Return a dict containing the new package names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example:
salt '*' yum.install <package name>
'''
old = list_pkgs()
cmd = 'yum -y install ' + pkg
if refresh:
refresh_db()
subprocess.call(cmd, shell=True)
new = list_pkgs()
pkgs = {}
for npkg in new:
if old.has_key(npkg):
if old[npkg] == new[npkg]:
# no change in the package
continue
else:
# the package was here before and the version has changed
pkgs[npkg] = {'old': old[npkg],
'new': new[npkg]}
else:
# the package is freshly installed
pkgs[npkg] = {'old': '',
'new': new[npkg]}
return pkgs
def upgrade():
'''
Run a full system upgrade, a yum upgrade
Return a dict containing the new package names and versions:
{'<package>': {'old': '<old-version>',
'new': '<new-version>']}
CLI Example:
salt '*' yum.upgrade
'''
old = list_pkgs()
cmd = 'yum -y upgrade'
subprocess.call(cmd, shell=True)
new = list_pkgs()
pkgs = {}
for npkg in new:
if old.has_key(npkg):
if old[npkg] == new[npkg]:
# no change in the package
continue
else:
# the package was here before and the version has changed
pkgs[npkg] = {'old': old[npkg],
'new': new[npkg]}
else:
# the package is freshly installed
pkgs[npkg] = {'old': '',
'new': new[npkg]}
return pkgs
def remove(pkg):
'''
Remove a single package with yum remove
Return a list containing the removed packages:
CLI Example:
salt '*' yum.remove <package name>
'''
old = list_pkgs()
cmd = 'yum -y remove ' + pkg
subprocess.call(cmd, shell=True)
new = list_pkgs()
return _list_removed(old, new)
def purge(pkg):
'''
Yum does not have a purge, this function calls remove
Return a list containing the removed packages:
CLI Example:
salt '*' yum.purge <package name>
'''
return remove pkg
|
apache-2.0
|
Python
|
|
2207e8dfbf1ea0f11cac0a95f7c5317eaae27f9b
|
Add cron state support
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
salt/states/cron.py
|
salt/states/cron.py
|
'''
Manage cron states
'''
def present(name,
user='root',
minute='*',
hour='*',
daymonth='*',
month='*',
dayweek='*',
):
'''
Verifies that the specified cron job is present for the specified user
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
data = __salt__['cron.set_job'](
user,
minute,
hour,
daymonth,
month,
dayweek,
name,
)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
ret['comment'] = 'Cron {0} for user {1} failed to commit with error \n{2}'.format(
name,
user,
data
)
ret['result'] = False
return ret
|
apache-2.0
|
Python
|
|
86c2441be14dbc3303b0bc65356372728a62fd4a
|
Add infrastructure for counting database queries
|
terceiro/squad,terceiro/squad,terceiro/squad,terceiro/squad
|
test/performance.py
|
test/performance.py
|
from contextlib import contextmanager
import json
import os
import re
import sys
from django.conf import settings
from django.db import connection, reset_queries
count = {}
@contextmanager
def count_queries(k):
q = 0
debug = settings.DEBUG
try:
settings.DEBUG = True
reset_queries()
yield
q = len(connection.queries)
finally:
settings.DEBUG = debug
count.setdefault(k, 0)
count[k] += q
return q
def export(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(f):
diff(f)
with open(f, 'w') as output:
output.write(json.dumps(count))
def diff(previous_file):
previous = json.loads(open(previous_file).read())
improvements = []
regressions = []
for k, v in count.items():
if k in previous:
v0 = previous[k]
if v > v0:
regressions.append((k, v0, v))
elif v < v0:
improvements.append((k, v0, v))
if improvements:
list_changes(improvements, 'DATABASE PERFORMANCE IMPROVEMENTS')
if regressions:
list_changes(regressions, 'DATABASE PERFORMANCE REGRESSIONS')
print('')
print('If there are good reasons for the increase(s) above (e.g. new features), just remove `%s` and carry on. You will not be bothered again.' % previous_file)
sys.exit(1)
def list_changes(data, title):
print('')
print(title)
print(re.sub('.', '-', title))
print('Unit: number of database queries')
print('')
for k, v0, v in data:
print("%s: %d -> %d" % (k, v0, v))
|
agpl-3.0
|
Python
|
|
059b7c5705d2134ca998e67caf65e3125d503dbc
|
add sitemap.py
|
toddsifleet/staticpy,toddsifleet/staticpy
|
staticpy/page/sitemap.py
|
staticpy/page/sitemap.py
|
from __future__ import absolute_import
import os
from jinja2 import Environment, PackageLoader
from ..utils import write_to_file
class Sitemap(object):
def __init__(self, site):
self.env = Environment(loader=PackageLoader('dynamic', 'templates'))
self.site = site
def write(self):
template = self.env.get_template('sitemap.html')
file_path = os.path.join(
self.site.output_path,
'static',
'sitemap.xml'
)
site_map = template.render(
pages=self.pages,
base_url=self.site.base_url
)
write_to_file(file_path, site_map)
@property
def pages(self):
return [p for p in self.site.pages if p.sitemap]
|
mit
|
Python
|
|
b5568053325bd78c277d4bc0adff59cd12e10f48
|
Add a script to build plugin.
|
qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv
|
build-plugin.py
|
build-plugin.py
|
import os
UnrealEnginePath='/home/qiuwch/workspace/UnrealEngine'
UATScript = os.path.join(UnrealEnginePath, 'Engine/Build/BatchFiles/RunUAT.sh')
FullPluginFile = os.path.abspath('UnrealCV.uplugin')
os.system('%s BuildPlugin -plugin=%s' % (UATScript, FullPluginFile))
|
mit
|
Python
|
|
01803c0b8f09d8a818f3ca4db4e4b9c5c14634da
|
Create retrieve_data.py
|
fridayideas/daytomatoserver,fridayideas/daytomatoserver
|
data_crawler/retrieve_data.py
|
data_crawler/retrieve_data.py
|
import rauth
import time
def main():
locations = [(48.44, -123.34), (48.40, -123.37), (48.42, -123.30), (48.44, -123.33), (48.47, -123.32)]
api_calls = []
for lat,longi in locations:
params = get_search_parameters(lat, longi)
api_calls.append(get_results(params))
time.sleep(1.0)
##Do other processing
with open("data.txt", "a") as myfile:
myfile.write(str(api_calls))
with open("json_data.txt", "a") as myfile:
myfile.write("[")
f = open('data.txt', 'r')
for line in f.readlines():
#print line
if "rating" in line:
if "{u'is_claimed': True" in line:
rating = line.split("{u'is_claimed': True, u'rating': ")
else:
rating = line.split("{u'is_claimed': False, u'rating': ")
rating = rating[1].split(", u'mobile_url'")
print rating[0]
name = rating[1].split("'name': u'")
name = name[1].split("', u'rating_img_url_small':")
print name[0]
categories = name[1].split("u'categories':")
categories = categories[1].split(", u'display_phone'")
print categories[0]
category = categories[0].split("'")
print category[1]
id = categories[1].split("u'id': u'")
id = id[1].split("', u'snippet_image_url'")
print id[0]
lat = id[1].split("latitude': ")
lat = lat[1].split(", u'longitude': ")
longi = lat[1].split("}, u'state_code")
print lat[0], longi[0]
pin = """{{"rating": {one}, "pinType": 0, "name": {two}, "description": {three}, "likes" : 0, "coordinate": {{ "latitude": {four}, "longitude": {five}}}, "linkedAccount": FridayIdeas, "reviews": [{{"linkedAccount":null,"text":null,"createDate":null}}]}},\n""".format(one=rating[0], two=name[0], three=category[1], four=lat[0], five=longi[0])
myfile.write(pin)
f.close()
myfile.write("]")
def get_results(params):
#Obtain these from Yelp's manage access page
consumer_key = "YOUR_CONSUMER_KEY"
consumer_secret = "YOUR_CONSUMER_SECRET"
token = "YOUR_TOKEN"
token_secret = "YOUR TOKEN_SECRET"
session = rauth.OAuth1Session(
consumer_key = consumer_key
,consumer_secret = consumer_secret
,access_token = token
,access_token_secret = token_secret)
request = session.get("http://api.yelp.com/v2/search",params=params)
#Transforms the JSON API response into a Python dictionary
data = request.json()
session.close()
return data
def get_search_parameters(lat,longi):
#See the Yelp API for more details
params = {}
params["categories"] = "restaurant"
params["ll"] = "{},{}".format(str(lat),str(longi))
params["radius"] = "1000"
return params
if __name__=="__main__":
main()
|
apache-2.0
|
Python
|
|
3be1c4f57e68b89d3c740a444e1f14ba67f3eada
|
Add a snippet.
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_timeedit_widget.py
|
python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_timeedit_widget.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
import datetime
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QTimeEdit
DATETIME_FORMAT = '%H:%M:%S'
class MyData:
def __init__(self):
self._num_rows = 3
self._num_columns = 2
self._data = [[datetime.datetime.now().strftime(DATETIME_FORMAT) for j in range(self._num_columns)] for i in range(self._num_rows)]
def get_num_rows(self):
return self._num_rows
def get_num_columns(self):
return self._num_columns
def get_data(self, row_index, column_index):
value = self._data[row_index][column_index]
print("read ({},{}): {}".format(row_index, column_index, value))
return value
def set_data(self, row_index, column_index, value):
print("write ({},{}): {}".format(row_index, column_index, value))
self._data[row_index][column_index] = value
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, data, parent=None):
super().__init__(parent)
self._data = data # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
def rowCount(self, parent):
return self._data.get_num_rows()
def columnCount(self, parent):
return self._data.get_num_columns()
def data(self, index, role):
if role == Qt.DisplayRole or role == Qt.EditRole:
# See https://stackoverflow.com/a/8480223
return self._data.get_data(index.row(), index.column())
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data.set_data(index.row(), index.column(), value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QTimeEdit(parent=parent)
editor.setMinimumTime(datetime.time(hour=8, minute=30, second=30))
editor.setMaximumTime(datetime.time(hour=23, minute=30, second=30))
editor.setDisplayFormat("HH:mm:ss")
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
return editor
def setEditorData(self, editor, index):
str_value = index.model().data(index, Qt.EditRole)
value = datetime.datetime.strptime(str_value, DATETIME_FORMAT)
editor.setTime(value.time()) # value cannot be a string, it have to be a datetime...
def setModelData(self, editor, model, index):
editor.interpretText()
value = editor.text()
model.setData(index, value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
data = MyData()
table_view = QTableView()
my_model = MyModel(data)
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
|
mit
|
Python
|
|
1f98fdc87ef62bb2b7a815f80c56f6957ab303b5
|
Add tests for tensor_operators
|
odashi/primitiv,odashi/primitiv,odashi/primitiv
|
python-primitiv/tests/tensor_operators.py
|
python-primitiv/tests/tensor_operators.py
|
from primitiv import Device
from primitiv import tensor_operators as tF
from primitiv.devices import Naive
import numpy as np
import unittest
class TensorOperatorsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self.device = Naive()
Device.set_default(self.device)
self.a = np.array([[1, 2], [3, 4]], np.float32)
self.b = np.array([[1, 1], [4, 8]], np.float32)
def tearDown(self):
pass
def test_tensor_pos(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((+x).to_ndarrays()[0] == self.a).all())
def test_tensor_neg(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((-x).to_ndarrays()[0] == -self.a).all())
def test_tensor_add(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x + y).to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
self.assertTrue(((x + 2).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
self.assertTrue(((2 + x).to_ndarrays()[0] == np.array([[3, 4], [5, 6]])).all())
def test_tensor_sub(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x - y).to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
self.assertTrue(((x - 2).to_ndarrays()[0] == np.array([[-1, 0], [1, 2]])).all())
self.assertTrue(((2 - x).to_ndarrays()[0] == np.array([[1, 0], [-1, -2]])).all())
def test_tensor_mul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x * y).to_ndarrays()[0] == np.array([[1, 2], [12, 32]])).all())
self.assertTrue(((x * 2).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
self.assertTrue(((2 * x).to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
def test_tensor_matmul(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x @ y).to_ndarrays()[0] == np.array([[9, 17], [19, 35]])).all())
self.assertRaises(TypeError, lambda: x @ 2)
self.assertRaises(TypeError, lambda: 2 @ x)
def test_tensor_truediv(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(((x / y).to_ndarrays()[0] == np.array([[1, 2], [0.75, 0.5]])).all())
self.assertTrue(((x / 2).to_ndarrays()[0] == np.array([[0.5, 1], [1.5, 2]])).all())
self.assertTrue(((2 / y).to_ndarrays()[0] == np.array([[2, 2], [0.5, 0.25]])).all())
def test_tensor_pow(self):
x = tF.input(self.a)
y = tF.input(self.b)
self.assertTrue(np.isclose((x ** y).to_ndarrays()[0], np.array([[1, 2], [81, 65536]])).all())
self.assertTrue(np.isclose((x ** 2).to_ndarrays()[0], np.array([[1, 4], [9, 16]])).all())
self.assertTrue(np.isclose((2 ** x).to_ndarrays()[0], np.array([[2, 4], [8, 16]])).all())
self.assertRaises(TypeError, lambda: pow(x, y, 2))
def test_tensor_iadd(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x += y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 3], [7, 12]])).all())
def test_tensor_isub(self):
x = tF.input(self.a)
y = tF.input(self.b)
x_tmp = x
x -= y
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[0, 1], [-1, -4]])).all())
def test_tensor_imul(self):
x = tF.input(self.a)
x_tmp = x
x *= 2
self.assertIs(x, x_tmp)
self.assertTrue((x.to_ndarrays()[0] == np.array([[2, 4], [6, 8]])).all())
|
apache-2.0
|
Python
|
|
bf42dd5246d935b0179faf1d563baa98bbcf0dbc
|
Create setup.py
|
olmallet81/URT,olmallet81/URT,olmallet81/URT
|
Python/setup.py
|
Python/setup.py
|
#==================================================================================================
# Copyright (C) 2016 Olivier Mallet - All Rights Reserved
#==================================================================================================
# run with:
# python setup.py build_ext --inplace
# before running python program you need to export the library path:
# export LD_LIBRARY_PATH=/path/to/URT/lib:$LD_LIBRARY_PATH
# export LD_LIBRARY_PATH=/home/olivier/Z/GitHub/Cpp/URT/lib:$LD_LIBRARY_PATH
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
from numpy import get_include
# linking to C++ libURT.so library
ext = Extension('CyURT',
sources = ['CyURT.pyx'],
include_dirs = [get_include()],
libraries = ['URT'],
extra_compile_args = ['-std=c++14','-Wall','-march=native','-DUSE_BLAZE','-DBLAZE_BLAS_INCLUDE_FILE <cblas.h>'],
extra_link_args = ['-L../lib'],
language='c++')
ext.cython_directives = {'boundscheck': False,'wraparound': False}
# turn off bounds-checking for entire function
# turn off negative index wrapping for entire function
setup(cmdclass = {'build_ext' : build_ext}, ext_modules = [ext])
|
mit
|
Python
|
|
5ae41fc3763f4fd4a25a7863ab139ef2709e9565
|
Fix missing import
|
benhamner/Metrics,wendykan/Metrics,thitchen/Metrics,dksahuji/Metrics,thitchen/Metrics,eduardofv/Metrics,AaronRanAn/Metrics,benhamner/Metrics,abimannans/Metrics,dksahuji/Metrics,wendykan/Metrics,eduardofv/Metrics,AaronRanAn/Metrics,eduardofv/Metrics,AaronRanAn/Metrics,ujjwalkarn/Metrics,benhamner/Metrics,wendykan/Metrics,eduardofv/Metrics,thitchen/Metrics,ujjwalkarn/Metrics,benhamner/Metrics,thitchen/Metrics,ujjwalkarn/Metrics,ujjwalkarn/Metrics,abimannans/Metrics,ujjwalkarn/Metrics,abimannans/Metrics,abimannans/Metrics,abimannans/Metrics,dksahuji/Metrics,wendykan/Metrics,wendykan/Metrics,AaronRanAn/Metrics,dksahuji/Metrics,AaronRanAn/Metrics,eduardofv/Metrics,benhamner/Metrics,thitchen/Metrics,dksahuji/Metrics
|
Python/setup.py
|
Python/setup.py
|
#!/usr/bin/env python
from setuptools import setup
import sys
requirements = [x.strip() for x in open("requirements.txt")]
# Automatically run 2to3 for Python 3 support
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='ml_metrics',
version='0.1.2',
description='Machine Learning Evaluation Metrics',
author = 'Ben Hamner',
author_email = 'ben@benhamner.com',
packages = ['ml_metrics', 'ml_metrics.custom'],
install_requires = requirements,
**extra)
|
#!/usr/bin/env python
from setuptools import setup
requirements = [x.strip() for x in open("requirements.txt")]
# Automatically run 2to3 for Python 3 support
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
setup(name='ml_metrics',
version='0.1.2',
description='Machine Learning Evaluation Metrics',
author = 'Ben Hamner',
author_email = 'ben@benhamner.com',
packages = ['ml_metrics', 'ml_metrics.custom'],
install_requires = requirements,
**extra)
|
bsd-2-clause
|
Python
|
2aa07b8ac9ba2ec8d2b1ac814b5a1fb3074a2616
|
test loading dataset
|
rnoxy/cifar10-cnn
|
test_loadDataset.py
|
test_loadDataset.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: test_loadDataset.py
# Author: Rafał Nowak <rafal.nowak@cs.uni.wroc.pl>
import unittest
class TestLoadDataset(unittest.TestCase):
"""Test load_CIFAR_dataset function from utils"""
def test_certain_images(self):
from myutils import load_CIFAR_dataset
data_training, data_testing = load_CIFAR_dataset(shuffle=False)
sample_id = 9
self.assertTrue( (data_training[sample_id-1][0][0,0,:] == [134, 186, 223]).all() )
sample_id = 19
self.assertTrue( (data_training[sample_id-1][0][30,31,:] == [91, 75, 64]).all() )
self.assertTrue( (data_testing[sample_id-1][0][30,31,:] == [61, 103, 125]).all() )
self.assertEqual( data_testing[sample_id-1][1], 8 )
def test_shuffling(self):
from myutils import load_CIFAR_dataset
data_training, data_testing = load_CIFAR_dataset()
sample_id = 192
x_training = data_training[sample_id][0][:,:]
y_training = data_training[sample_id][1]
sample_id = 190
x_testing = data_testing[sample_id][0][:,:]
y_testing = data_testing[sample_id][1]
data_training, data_testing = load_CIFAR_dataset(shuffle=True)
found = False
for i in range(0,50000):
if ( data_training[i][0][:,:] == x_training ).all():
if found:
self.fail()
else:
found = True
self.assertEqual( y_training , data_training[i][1] )
found = False
for i in range(0,10000):
if ( data_testing[i][0][:,:] == x_testing ).all():
if found:
self.fail()
else:
found = True
self.assertEqual( y_testing , data_testing[i][1] )
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
|
21d931e35d9e0b32415a408f28e45894f0c3e800
|
Add task files for celery async process
|
semorale/backend-test,semorale/backend-test,semorale/backend-test
|
django_backend_test/noras_menu/tasks.py
|
django_backend_test/noras_menu/tasks.py
|
# -*- encoding: utf-8 -*-
#app_mail/tasks.py
import requests
import simplejson as json
from django_backend_test.celery import app
from django.template.loader import render_to_string
from django.utils.html import strip_tags
from django.core.mail import EmailMultiAlternatives
from .models import Subscribers, MenuItems
@app.task
def mail_remainder(menu,link):
items_menu = MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
list_mail = Subscribers.objects.values_list('email', flat=True)
subject,from_email,to = 'Menu of the Day','alertas@electroquimica.cl',list_mail
html_content = render_to_string('menu_day.html',{'menu':items_menu,'link':str(link)})
text_content = strip_tags(html_content)
msg = EmailMultiAlternatives(subject,text_content,from_email,to)
msg.attach_alternative(html_content,"text/html")
msg.send()
@app.task
def slack_remainder(menu,link):
msg = u"Hola!\nDejo el menú de hoy :)\n {0} <http://{1}>"
items_menu= MenuItems.objects.filter(menu_id=menu.pk).values_list('name', flat=True)
text="".join([x+"\n" for x in items_menu])
data = {"text":msg.format(text,link), "username":"Nora", "icon_emoji": ":knife_fork_plate:",}
headers = {'Content-type': 'application/json'}
response = requests.post("https://hooks.slack.com/services/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", data=json.dumps(data), headers=headers)
|
mit
|
Python
|
|
849a29b22d656c8079b4ccaf922848fb057c80c5
|
Add migration to assign appropriate sheets to Transnational CountryRegion
|
Code4SA/gmmp,Code4SA/gmmp,Code4SA/gmmp
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
forms/migrations/0023_assign_sheets_to_transnational.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist
from django.db import migrations
def assign_transnational_region_to_sheets(apps, schema_editor):
from forms.models import sheet_models
CountryRegion = apps.get_model("forms", "CountryRegion")
Monitor = apps.get_model("gmmp", "Monitor")
db_alias = schema_editor.connection.alias
try:
trans_country_region = CountryRegion.objects.using(db_alias).get(country='T1', region='Transnational')
except ObjectDoesNotExist:
trans_country_region = CountryRegion(country='T1', region='Transnational')
trans_country_region.save()
monitor = Monitor.objects.get(user__last_name='Macharia', user__first_name='Sarah')
monitor.country = trans_country_region.country
monitor.save()
for name, model in sheet_models.iteritems():
sheets_model = apps.get_model("forms", model._meta.object_name)
sheets = sheets_model.objects.using(db_alias).filter(monitor=monitor)
for sheet in sheets:
sheet.country_region = trans_country_region
sheet.country = trans_country_region.country
sheet.save()
def backwards(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('forms', '0022_assign_country_region_to_sheet_models'),
]
operations = [
migrations.RunPython(
assign_transnational_region_to_sheets,
backwards,
),
]
|
apache-2.0
|
Python
|
|
22738b2cae0a6c77127bbf5385b7265247ffb306
|
migrate also user profiles
|
adaptive-learning/geography,adaptive-learning/geography,adaptive-learning/geography,adaptive-learning/geography
|
geography/management/commands/migrate_geography_user.py
|
geography/management/commands/migrate_geography_user.py
|
from proso_user.models import UserProfile
from django.core.management.base import BaseCommand
from optparse import make_option
from contextlib import closing
from django.db import connection
from clint.textui import progress
from django.db import transaction
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
'--clean',
action='store_true',
dest='clean',
default=False,
help='Delete all previously loaded data'),
)
def handle(self, *args, **options):
with transaction.atomic():
if options['clean']:
self.clean()
self.create_profiles()
def clean(self):
with closing(connection.cursor()) as cursor:
cursor.execute('TRUNCATE TABLE proso_user_userprofile')
def create_profiles(self):
with closing(connection.cursor()) as cursor:
cursor.execute(
'''
SELECT auth_user.id
FROM auth_user
LEFT JOIN lazysignup_lazyuser ON auth_user.id = lazysignup_lazyuser.user_id
WHERE lazysignup_lazyuser.id IS NULL
''')
for user_id, in progress.bar(cursor, every=max(1, cursor.rowcount / 100), expected_size=cursor.rowcount):
profile = UserProfile.objects.get_or_create(user_id=user_id, public=True)
|
mit
|
Python
|
|
e7640ad635a77eecbcc5291792b514e42958876e
|
add magic-gen.py
|
sdgdsffdsfff/criu,efiop/criu,AuthenticEshkinKot/criu,ldu4/criu,KKoukiou/criu-remote,gonkulator/criu,gonkulator/criu,fbocharov/criu,KKoukiou/criu-remote,biddyweb/criu,eabatalov/criu,rentzsch/criu,KKoukiou/criu-remote,efiop/criu,AuthenticEshkinKot/criu,svloyso/criu,tych0/criu,rentzsch/criu,wtf42/criu,gablg1/criu,ldu4/criu,svloyso/criu,fbocharov/criu,sdgdsffdsfff/criu,gonkulator/criu,ldu4/criu,fbocharov/criu,ldu4/criu,AuthenticEshkinKot/criu,svloyso/criu,biddyweb/criu,KKoukiou/criu-remote,gonkulator/criu,wtf42/criu,svloyso/criu,LK4D4/criu,sdgdsffdsfff/criu,marcosnils/criu,marcosnils/criu,marcosnils/criu,tych0/criu,efiop/criu,gablg1/criu,sdgdsffdsfff/criu,svloyso/criu,sdgdsffdsfff/criu,efiop/criu,tych0/criu,gablg1/criu,marcosnils/criu,gablg1/criu,eabatalov/criu,AuthenticEshkinKot/criu,biddyweb/criu,tych0/criu,biddyweb/criu,biddyweb/criu,LK4D4/criu,gablg1/criu,gonkulator/criu,marcosnils/criu,fbocharov/criu,ldu4/criu,sdgdsffdsfff/criu,rentzsch/criu,svloyso/criu,rentzsch/criu,rentzsch/criu,AuthenticEshkinKot/criu,wtf42/criu,fbocharov/criu,wtf42/criu,AuthenticEshkinKot/criu,biddyweb/criu,gonkulator/criu,eabatalov/criu,rentzsch/criu,gablg1/criu,marcosnils/criu,efiop/criu,LK4D4/criu,KKoukiou/criu-remote,LK4D4/criu,KKoukiou/criu-remote,wtf42/criu,eabatalov/criu,wtf42/criu,tych0/criu,tych0/criu,fbocharov/criu,eabatalov/criu,eabatalov/criu,LK4D4/criu,LK4D4/criu,ldu4/criu,efiop/criu
|
scripts/magic-gen.py
|
scripts/magic-gen.py
|
#!/bin/env python
import os, sys
import struct
# This program parses criu magic.h file and produces
# magic.py with all *_MAGIC constants except RAW and V1.
def main(argv):
if len(argv) != 3:
print("Usage: magic-gen.py path/to/image.h path/to/magic.py")
exit(1)
magic_c_header = argv[1]
magic_py = argv[2]
out = open(magic_py, 'w+')
# all_magic is used to parse constructions like:
# #define PAGEMAP_MAGIC 0x56084025
# #define SHMEM_PAGEMAP_MAGIC PAGEMAP_MAGIC
all_magic = {}
# and magic is used to store only unique magic.
magic = {}
f = open(magic_c_header, 'r')
for line in f:
split = line.split()
if len(split) < 3:
continue
if not '#define' in split[0]:
continue
key = split[1]
value = split[2]
if value in all_magic:
value = all_magic[value]
else:
magic[key] = value
all_magic[key] = value
out.write('#Autogenerated. Do not edit!\n')
out.write('by_name = {}\n')
out.write('by_val = {}\n')
for k,v in magic.items():
# We don't need RAW or V1 magic, because
# they can't be used to identify images.
if v == '0x0' or v == '1' or k == '0x0' or v == '1':
continue
if k.endswith("_MAGIC"):
# Just cutting _MAGIC suffix
k = k[:-6]
v = int(v, 16)
out.write("by_name['"+ k +"'] = "+ str(v) +"\n")
out.write("by_val["+ str(v) +"] = '"+ k +"'\n")
f.close()
out.close()
if __name__ == "__main__":
main(sys.argv)
|
lgpl-2.1
|
Python
|
|
03c0aa498470037ef2aa6a8233198ff521f8d42f
|
add the links demo
|
MathieuDuponchelle/pygobject,davidmalcolm/pygobject,thiblahute/pygobject,davibe/pygobject,thiblahute/pygobject,jdahlin/pygobject,GNOME/pygobject,pexip/pygobject,choeger/pygobject-cmake,nzjrs/pygobject,davibe/pygobject,pexip/pygobject,MathieuDuponchelle/pygobject,GNOME/pygobject,MathieuDuponchelle/pygobject,davidmalcolm/pygobject,davibe/pygobject,sfeltman/pygobject,alexef/pygobject,alexef/pygobject,sfeltman/pygobject,davidmalcolm/pygobject,sfeltman/pygobject,nzjrs/pygobject,davibe/pygobject,thiblahute/pygobject,jdahlin/pygobject,pexip/pygobject,nzjrs/pygobject,choeger/pygobject-cmake,alexef/pygobject,choeger/pygobject-cmake,jdahlin/pygobject,GNOME/pygobject
|
demos/gtk-demo/demos/links.py
|
demos/gtk-demo/demos/links.py
|
#!/usr/bin/env python
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2010 Red Hat, Inc., John (J5) Palmieri <johnp@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
title = "Links"
description = """
GtkLabel can show hyperlinks. The default action is to call gtk_show_uri() on
their URI, but it is possible to override this with a custom handler.
"""
from gi.repository import Gtk
class LinksApp:
def __init__(self):
self.window = Gtk.Window()
self.window.set_title('Links')
self.window.set_border_width(12)
self.window.connect('destroy', Gtk.main_quit)
label = Gtk.Label("""Some <a href="http://en.wikipedia.org/wiki/Text"
title="plain text">text</a> may be marked up
as hyperlinks, which can be clicked
or activated via <a href="keynav">keynav</a>""")
label.set_use_markup(True)
label.connect("activate-link", self.activate_link)
self.window.add(label);
label.show()
self.window.show()
def activate_link(self, label, uri):
if uri == 'keynav':
parent = label.get_toplevel()
markup = """The term <i>keynav</i> is a shorthand for
keyboard navigation and refers to the process of using
a program (exclusively) via keyboard input."""
dialog = Gtk.MessageDialog(parent,
Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.OK,
text=markup,
use_markup=True)
dialog.present()
dialog.connect('response', self.response_cb)
return True
def response_cb(self, dialog, response_id):
dialog.destroy()
def main(demoapp=None):
app = LinksApp()
Gtk.main()
if __name__ == '__main__':
main()
|
lgpl-2.1
|
Python
|
|
dc82990f7a00e5e1e4d2a860630507f9cb3b81d4
|
add script for just opening a package source
|
sassoftware/mirrorball,sassoftware/mirrorball
|
scripts/pkgsource.py
|
scripts/pkgsource.py
|
#!/usr/bin/python
import sys
from conary.lib import util
sys.excepthook = util.genExcepthook()
import logging
import updatebot.log
updatebot.log.addRootLogger()
log = logging.getLogger('test')
from aptmd import Client
from updatebot import config
from updatebot import pkgsource
cfg = config.UpdateBotConfig()
cfg.read('/data/hg/mirrorball/config/ubuntu/updatebotrc')
client = Client('http://i.rdu.rpath.com/ubuntu')
pkgSource = pkgsource.PackageSource(cfg)
for path in cfg.repositoryPaths:
log.info('loading %s' % path)
pkgSource.loadFromClient(client, path)
pkgSource.finalize()
import epdb; epdb.st()
|
apache-2.0
|
Python
|
|
d6fa3fb8aa67d7581990c9278794516e499a3eb3
|
Create RegRipbyDate.py
|
MalWerewolf/RegRipbyDate
|
RegRipbyDate.py
|
RegRipbyDate.py
|
'''
Created on May 19, 2014
@author: CaptainCrabnasty
----------------------------------------------------------------------------------
Copyright 2014
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
----------------------------------------------------------------------------------
A script very similar to William Ballenthin's print all sample script @
https://github.com/williballenthin/python-registry/blob/master/samples/printall.py,
but this allows for a more elegant way to do a refined search of the registry to happen
based upon the modified date.
'''
from Registry import Registry
from datetime import datetime
import argparse
import os
import csv
mydic = {}
def createdate(datestr):
try:
return datetime.strptime(datestr, "%Y-%m-%d %H:%M:%S")
except Exception, e:
print e
def fileexists(filepath):
try:
if os.path.isfile(filepath):
return filepath
else:
print "There is no hive at:" + filepath
except Exception, e:
print e
def rip():
parser = argparse.ArgumentParser(description="Parse the Windows registry hive for date related artifacts.")
parser.add_argument("-e", "--earliest", type=createdate, required=True, metavar="2014-01-26 00:00:00", help="Earliest Date. Format: 2014-01-26 00:00:00")
parser.add_argument("-l", "--latest", type=createdate, required=True, metavar="2014-01-27 00:00:00", help="Latest Date Format: 2014-01-27 00:00:00")
parser.add_argument('-i', '--hive', type=fileexists, required=True, metavar="'/Desktop/Somewhere/HiveName'", help='Location of the Windows registry hive file Date')
parser.add_argument('-c', '--csv', help='Optional Parameter == output of csv.', action='store_true', default=False)
args = parser.parse_args()
if args.earliest and args.latest and args.hive:
f = open(args.hive, "rb")
r = Registry.Registry(f)
MIN_DATE = datetime.strptime(str(args.earliest), "%Y-%m-%d %H:%M:%S")
MAX_DATE = datetime.strptime(str(args.latest), "%Y-%m-%d %H:%M:%S")
#createdate(str(args.earliest))
rec(r.root(),MIN_DATE,MAX_DATE)
if args.csv:
#User wants csv output.
x=csv.writer(open(getDir(args.hive),'wb'), delimiter=',', dialect='excel-tab', quoting=csv.QUOTE_ALL)
x.writerow(['Timestamp','Key Path'])
for key, value in mydic.iteritems():
x.writerow([str(key), str(value)])
else:
#User wants no output.
for key, value in mydic.iteritems():
print "%s, %s" % (key, value)
else:
print parser.usage
exit(0)
def rec(key,MIN_DATE,MAX_DATE):
if MIN_DATE < key.timestamp() < MAX_DATE:
#print "%s %s" % (key.timestamp(), key.path())
mydic.update({key.timestamp(): key.path()})
for subkey in key.subkeys():
rec(subkey,MIN_DATE,MAX_DATE)
def getDir(x):
return x.rsplit('/', 1)[0] + '/' + 'RegRipbyDate.txt'
if __name__ == "__main__":
rip()
|
apache-2.0
|
Python
|
|
eeb9b9877f1aa5bc1f22ac4883fe58a57ee0474a
|
Add script to test HOTS
|
oliverlee/phobos,oliverlee/phobos,oliverlee/phobos,oliverlee/phobos
|
scripts/test_hots.py
|
scripts/test_hots.py
|
import numpy as np
events = [
(1162704874, -5547),
(1179727586, -5548),
(1209562198, -5547),
(1224960594, -5548),
]
t, x = zip(*events)
t = np.array(t)
x = np.array(x)
t = t - t[0] # redefine zero time
alpha = 1/t[-1]
t = alpha*t # scale time values
A = np.ones((4, 4))
A[:, -2] = np.array(t)
for i in reversed(range(0, A.shape[1] - 2)):
A[:, i] = A[:, i + 1] * A[:, -2]
B = np.array(x)
print(A)
print(B)
P = np.linalg.lstsq(A, B)[0]
print(P)
tc = alpha*(events[-1][0] + 1000)
print(tc)
T = np.ones(4)
for i in reversed(range(0, T.shape[0] - 1)):
T[i] = tc * T[i + 1]
print(T)
print(np.dot(P, T))
|
bsd-2-clause
|
Python
|
|
3862ea1b1cae1c3be80824495d1c6937a18378b9
|
test added
|
mjirik/pyseg_base,mjirik/pysegbase,mjirik/pyseg_base,mjirik/pysegbase
|
tests/pycut_test.py
|
tests/pycut_test.py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import sys
import os.path
import copy
path_to_script = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path_to_script, "../src/"))
import unittest
import numpy as np
import pycut
class PycutTest(unittest.TestCase):
# @TODO znovu zprovoznit test
#@unittest.skip("Cekame, az to Tomas opravi")
def test_ordered_values_by_indexes(self):
"""
test of pycut.__ordered_values_by_indexes
"""
slab = {'none':0, 'liver':1, 'porta':2, 'lesions':6}
voxelsize_mm = np.array([1.0,1.0,1.2])
segm = np.zeros([256,256,80], dtype=np.int16)
# liver
segm[70:190,40:220,30:60] = slab['liver']
# port
segm[120:130,70:220,40:45] = slab['porta']
segm[80:130,100:110,40:45] = slab['porta']
segm[120:170,130:135,40:44] = slab['porta']
# vytvoření kopie segmentace - před určením lézí
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
Python
|
|
ee169acf82eff08daa40c461263712f2af2a1131
|
Add a standalone simulation script (really a duplicate of sensitivity.py)
|
chatelak/RMG-Py,pierrelb/RMG-Py,chatelak/RMG-Py,nyee/RMG-Py,pierrelb/RMG-Py,nickvandewiele/RMG-Py,nyee/RMG-Py,nickvandewiele/RMG-Py
|
scripts/simulate.py
|
scripts/simulate.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script runs stand-alone simulation on an RMG job. This is effectively the
same script as sensitivity.py
"""
import os.path
import argparse
from rmgpy.tools.sensitivity import runSensitivity
################################################################################
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('input', metavar='INPUT', type=str, nargs=1,
help='RMG input file')
parser.add_argument('chemkin', metavar='CHEMKIN', type=str, nargs=1,
help='Chemkin file')
parser.add_argument('dictionary', metavar='DICTIONARY', type=str, nargs=1,
help='RMG dictionary file')
args = parser.parse_args()
inputFile = os.path.abspath(args.input[0])
chemkinFile = os.path.abspath(args.chemkin[0])
dictFile = os.path.abspath(args.dictionary[0])
return inputFile, chemkinFile, dictFile
def main():
# This might not work anymore because functions were modified for use with webserver
inputFile, chemkinFile, dictFile = parse_arguments()
runSensitivity(inputFile, chemkinFile, dictFile)
################################################################################
if __name__ == '__main__':
main()
|
mit
|
Python
|
|
0f31db66a38073e1549d977909c5f4c5d3eab280
|
Create permutation-in-string.py
|
kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,kamyu104/LeetCode
|
Python/permutation-in-string.py
|
Python/permutation-in-string.py
|
# Time: O(n)
# Space: O(1)
# Given two strings s1 and s2, write a function to return true
# if s2 contains the permutation of s1. In other words,
# one of the first string's permutations is the substring of the second string.
#
# Example 1:
# Input:s1 = "ab" s2 = "eidbaooo"
# Output:True
# Explanation: s2 contains one permutation of s1 ("ba").
# Example 2:
# Input:s1= "ab" s2 = "eidboaoo"
# Output: False
# Note:
# The input strings only contain lower case letters.
# The length of both given strings is in range [1, 10,000].
class Solution(object):
def checkInclusion(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
counts = collections.Counter(s1)
l = len(s1)
for i in xrange(len(s2)):
if counts[s2[i]] > 0:
l -= 1
counts[s2[i]] -= 1
if l == 0:
return True
start = i + 1 - len(s1)
if start >= 0:
counts[s2[start]] += 1
if counts[s2[start]] > 0:
l += 1
return False
|
mit
|
Python
|
|
ebb797bb7596adc71b1e906cb7d7f94b56e8f535
|
Create subarray-sum-equals-k.py
|
jaredkoontz/leetcode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,kamyu104/LeetCode
|
Python/subarray-sum-equals-k.py
|
Python/subarray-sum-equals-k.py
|
# Time: O(n)
# Space: O(n)
# Given an array of integers and an integer k,
# you need to find the total number of continuous subarrays whose sum equals to k.
#
# Example 1:
# Input:nums = [1,1,1], k = 2
# Output: 2
#
# Note:
# The length of the array is in range [1, 20,000].
# The range of numbers in the array is [-1000, 1000] and the range of the integer k is [-1e7, 1e7].
class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = 0
accumulated_sum = 0
lookup = collections.defaultdict(int)
lookup[0] += 1
for num in nums:
accumulated_sum += num
result += lookup[accumulated_sum - k]
lookup[accumulated_sum] += 1
return result
|
mit
|
Python
|
|
c9b75d5195666efaef8b52d9f2f2b70d9b11f25f
|
Create individual file used for initializing db
|
ganemone/ontheside,ganemone/ontheside,ganemone/ontheside
|
server/models/db.py
|
server/models/db.py
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
|
mit
|
Python
|
|
c0ba4a18433a05f492cfb78716fc77e14c8b4f56
|
test solvable:filelist attribute
|
openSUSE/sat-solver-bindings,openSUSE/sat-solver-bindings,openSUSE/sat-solver-bindings,openSUSE/sat-solver-bindings,openSUSE/sat-solver-bindings,openSUSE/sat-solver-bindings
|
bindings/python/tests/filelist.py
|
bindings/python/tests/filelist.py
|
#
# Check Filelists
#
import unittest
import sys
sys.path.insert(0, '../../../build/bindings/python')
import satsolver
class TestSequenceFunctions(unittest.TestCase):
def test_filelists(self):
pool = satsolver.Pool()
assert pool
pool.set_arch("x86_64")
repo = pool.add_solv( "os11-biarch.solv" )
repo.set_name( "openSUSE 11.0 Beta3 BiArch" )
i = 0
for solv in pool:
print "Filelist for ", solv
if solv.attr_exists('solvable:filelist'):
# print solv, " has a filelist"
print solv.attr('solvable:filelist')
else:
print '-'
i = i + 1
if i > 2:
break
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
Python
|
|
d1eceaf35b74166f3471dea86b194f67a152cb19
|
add Python script to diff two source trees
|
apache/manifoldcf-integration-solr-4.x
|
dev-tools/scripts/diffSources.py
|
dev-tools/scripts/diffSources.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
# recursive, unified output format, treat missing files as present but empty
DIFF_FLAGS = '-ruN'
if '-skipWhitespace' in sys.argv:
sys.argv.remove('-skipWhitespace')
# ignores only whitespace changes
DIFF_FLAGS += 'bBw'
if len(sys.argv) != 3:
print
print 'Usage: python -u diffSources.py <dir1> <dir2> [-skipWhitespace]'
print
print '''This tool creates an applying patch between two directories.
While you could use this to make a committable patch from a branch, that approach loses
the svn history from the branch (better to use "svn merge --reintegrate", for example). This
diff output should not be considered "authoritative" from a merging standpoint as it does
not reflect what svn will do on merge.
'''
print
sys.exit(0)
p = subprocess.Popen(['diff', DIFF_FLAGS, '-x', '.svn', '-x', 'build', sys.argv[1], sys.argv[2]], shell=False, stdout=subprocess.PIPE)
keep = False
while True:
l = p.stdout.readline()
if l == '':
break
if l.endswith('\r\n'):
l = l[:-2]
elif l.endswith('\n'):
l = l[:-1]
if l.startswith('diff ') or l.startswith('Binary files '):
keep = l.lower().find('/build/') == -1 and (l.lower().startswith('Only in') or ((l.lower().endswith('.java') or l.lower().endswith('.txt') or l.lower().endswith('.xml') or l.lower().endswith('.iml')) and l.find('/.svn/') == -1))
if keep:
print
print
print l.strip()
elif keep:
print l
elif l.startswith('Only in'):
print l.strip()
|
apache-2.0
|
Python
|
|
74f3f70337e9924e4fce030d6a5941ce506bfee9
|
Add a runserver script to start the application for development purposes
|
whitel/fresque,fedora-infra/fresque,rahulrrixe/fresque,whitel/fresque,whitel/fresque,rahulrrixe/fresque,vivekanand1101/fresque,fedora-infra/fresque,rahulrrixe/fresque,whitel/fresque,rahulrrixe/fresque,fedora-infra/fresque,fedora-infra/fresque,vivekanand1101/fresque,vivekanand1101/fresque,vivekanand1101/fresque
|
runserver.py
|
runserver.py
|
#!/usr/bin/env python
## These two lines are needed to run on EL6
__requires__ = ['SQLAlchemy >= 0.8', 'jinja2 >= 2.4']
import pkg_resources
import sys
from werkzeug.contrib.profiler import ProfilerMiddleware
from fresque import APP
APP.debug = True
if '--profile' in sys.argv:
APP.config['PROFILE'] = True
APP.wsgi_app = ProfilerMiddleware(APP.wsgi_app, restrictions=[30])
APP.run()
|
agpl-3.0
|
Python
|
|
93039b9cbea2c8355b8d8651ec0d15cdd73169a6
|
Create findmean.py
|
wdyer0726/CS101
|
udacity/findmean.py
|
udacity/findmean.py
|
# The mean of a set of numbers is the sum of the numbers divided by the
# number of numbers. Write a procedure, list_mean, which takes a list of numbers
# as its input and return the mean of the numbers in the list.
# Hint: You will need to work out how to make your division into decimal
# division instead of integer division. You get decimal division if any of
# the numbers involved are decimals.
def list_mean():
i = 0
sum_int = 0.0
while i < len(p): # or <=?
sum_int = p[i] + sum_int
i = i + 1
return sum_int / len(p)
print list_mean([1,2,3,4])
#>>> 2.5
print list_mean([1,3,4,5,2])
#>>> 3.0
print list_mean([2])
#>>> 2.0
|
apache-2.0
|
Python
|
|
7e2a1ac8f297223accdf2ec421d8c9c7a2fe4b3c
|
add the updated script
|
avinassh/avinassh.github.io,avinassh/avinassh.github.io
|
source/script.py
|
source/script.py
|
import os
import sys
import re
from datetime import datetime
import pytz
def replace_meta(content: str):
# all meta has to be between ---, so lets append that
content = '+++\n' + content
# match and replace the `Title: <something>` to `title = "<something>"`
content = re.sub(r'Title: *(.*)\n', r'title = "\1"\n', content)
# match and replace the `Slug: <something>` to `slug = "<something>"`
content = re.sub(r'Slug: *(.*)\n', r'slug = "\1"\n', content)
# match and replace the `FacebookImage: <something>` to `image = "<something>"`
content = re.sub(r'FacebookImage: *(.*)\n', r'image = "\1"\n', content)
# to replace Date and Modified, we need to first get the string, parse it and convert it
# to the format golang wants
date_regex = re.compile(r'Date: *(?P<date> .*)\n')
# date_search = date_regex.search(content)
if match := date_regex.search(content):
date = match.groupdict().get('date')
# the date format is like `2016-02-19 23:03`
date_time = datetime.strptime(date.strip(), '%Y-%m-%d %H:%M').astimezone(pytz.timezone('Asia/Kolkata'))
content = date_regex.sub(F'date = "{date_time.isoformat()}"\n', content)
# same like Date, but for Modified
modified_regex = re.compile(r'Modified: *(?P<date> .*)\n')
if match := modified_regex.search(content):
date = match.groupdict().get('date')
# the date format is like `2016-02-19 23:03`
date_time = datetime.strptime(date.strip(), '%Y-%m-%d %H:%M').astimezone(pytz.timezone('Asia/Kolkata'))
content = modified_regex.sub(F'lastmod = "{date_time.isoformat()}"\n', content)
categories_regex = re.compile(r'Category: *(?P<category> .*)\n')
if match := categories_regex.search(content):
categories = match.groupdict().get('category')
replacement_string = ', '.join([F'"{s.strip().lower()}"' for s in categories.split(',')])
content = categories_regex.sub(F'categories = [{replacement_string}]\n', content)
tags_regex = re.compile(r'Tags: *(?P<tags> .*)\n')
if match := tags_regex.search(content):
tags = match.groupdict().get('tags')
replacement_string = ', '.join([F'"{s.strip().lower()}"' for s in tags.split(',')])
content = tags_regex.sub(F'tags = [{replacement_string}]\n', content)
# same like how the title was replaced. We will also append it `---` to close the meta
# block
content = re.sub(r'Summary: *(.*)\n', r'description = "\1"\n+++\n', content)
return content
def convert(path):
for root, dirs, files in os.walk(path):
for name in files:
if name.endswith('.md'):
file = os.path.join(root, name)
print(file)
with open(file, 'r') as f:
updated_content = replace_meta(f.read())
with open(file, 'w') as f:
f.write(updated_content)
if __name__ == '__main__':
convert(sys.argv[1])
|
mit
|
Python
|
|
7ea9bbd3315fed4d6fd319a865517a4f72228342
|
Create test.py
|
SchoolIdolTomodachi/SchoolIdolAPIOAuthExample,SchoolIdolTomodachi/SchoolIdolAPIOAuthExample,SchoolIdolTomodachi/SchoolIdolAPIOAuthExample
|
Python/test.py
|
Python/test.py
|
apache-2.0
|
Python
|
||
199b6bb0c62028d93e1204d96591500b0f76e834
|
Add Robot_V002b.py Object Oriented Version
|
mirrorcoloured/slcypi
|
Robot_V002b.py
|
Robot_V002b.py
|
#!/usr/bin/python
import sys
sys.path.append("/home/pi/Documents/Robots/slcypi/MA") ### ADD PATH
sys.path.append("/home/pi/Documents/Robots/slcypi/HAT_Python3") ### ADD PATH
import time
from time import sleep
import atexit
import pygame
import pygame.camera
from PIL import Image
#from pylab import *
from Tank import Tank
# Pygame and camera initialize
pygame.init()
pygame.display.set_caption('My Robot')
pygame.camera.init()
screen = pygame.display.set_mode((640,480),0)
cam_list = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_list[0],(320,240))
cam.start()
robot = Tank()
try:
print('starting loop')
done = False
while not done:
# Camera
image1 = cam.get_image()
image1 = pygame.transform.scale(image1,(640,480))
image1 = pygame.transform.flip(image1,1,1)
screen.blit(image1,(0,0))
pygame.display.update()
# User events
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == (pygame.K_UP):
robot.drive(1)
if event.key == (pygame.K_DOWN):
robot.drive(-1)
if (event.key == pygame.K_ESCAPE):
done = True
if (event.key == pygame.K_LEFT):
robot.rotate(1)
if (event.key == pygame.K_RIGHT):
robot.rotate(-1)
if event.type == pygame.KEYUP:
if event.key == (pygame.K_UP):
robot.drive(0)
if event.key == (pygame.K_DOWN):
robot.drive(0)
if (event.key == pygame.K_LEFT):
robot.rotate(0)
if (event.key == pygame.K_RIGHT):
robot.rotate(0)
except KeyboardInterrupt:
pygame.quit()
cam.stop()
pygame.quit()
|
mit
|
Python
|
|
eeb0187b9d474b9b5d1710e8f45f8116894eb15c
|
Read Temperature from DS18B20. Post the data to data.sparkfun.com
|
fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout
|
temp-sensor02/main.py
|
temp-sensor02/main.py
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
mit
|
Python
|
|
d2e63dfc644e323bf23fbd6654f7493ed94d7991
|
Use HTTPS for libchromiumcontent's URL
|
rajatsingla28/electron,rajatsingla28/electron,MaxWhere/electron,joaomoreno/atom-shell,electron/electron,twolfson/electron,shiftkey/electron,rreimann/electron,leftstick/electron,tonyganch/electron,brenca/electron,dongjoon-hyun/electron,tonyganch/electron,felixrieseberg/electron,voidbridge/electron,leftstick/electron,leethomas/electron,bbondy/electron,roadev/electron,tylergibson/electron,posix4e/electron,etiktin/electron,lzpfmh/electron,gerhardberger/electron,jaanus/electron,evgenyzinoviev/electron,jhen0409/electron,brenca/electron,simongregory/electron,brenca/electron,jhen0409/electron,astoilkov/electron,voidbridge/electron,brenca/electron,tonyganch/electron,evgenyzinoviev/electron,stevekinney/electron,Gerhut/electron,miniak/electron,roadev/electron,felixrieseberg/electron,ankitaggarwal011/electron,MaxWhere/electron,preco21/electron,thompsonemerson/electron,minggo/electron,etiktin/electron,lzpfmh/electron,pombredanne/electron,leethomas/electron,rajatsingla28/electron,felixrieseberg/electron,lzpfmh/electron,bpasero/electron,jhen0409/electron,electron/electron,tinydew4/electron,pombredanne/electron,joaomoreno/atom-shell,roadev/electron,dongjoon-hyun/electron,miniak/electron,MaxWhere/electron,bbondy/electron,twolfson/electron,the-ress/electron,thomsonreuters/electron,brave/electron,biblerule/UMCTelnetHub,evgenyzinoviev/electron,bbondy/electron,evgenyzinoviev/electron,kcrt/electron,thompsonemerson/electron,posix4e/electron,the-ress/electron,brave/electron,etiktin/electron,deed02392/electron,gerhardberger/electron,shiftkey/electron,tonyganch/electron,the-ress/electron,Floato/electron,brave/muon,aliib/electron,electron/electron,renaesop/electron,voidbridge/electron,bpasero/electron,brave/electron,noikiy/electron,seanchas116/electron,jaanus/electron,Floato/electron,kcrt/electron,tylergibson/electron,bbondy/electron,biblerule/UMCTelnetHub,brave/electron,thomsonreuters/electron,miniak/electron,gabriel/electron,simongregory/electron,Gerhut/electron,seanchas116/electron,Floato/electron,aichingm/electron,seanchas116/electron,jhen0409/electron,evgenyzinoviev/electron,tylergibson/electron,miniak/electron,seanchas116/electron,Evercoder/electron,minggo/electron,astoilkov/electron,thomsonreuters/electron,Floato/electron,Evercoder/electron,jaanus/electron,Floato/electron,dongjoon-hyun/electron,electron/electron,the-ress/electron,aichingm/electron,tinydew4/electron,rreimann/electron,minggo/electron,gabriel/electron,thomsonreuters/electron,twolfson/electron,wan-qy/electron,seanchas116/electron,stevekinney/electron,aliib/electron,minggo/electron,minggo/electron,posix4e/electron,astoilkov/electron,noikiy/electron,Gerhut/electron,leethomas/electron,stevekinney/electron,preco21/electron,brave/muon,renaesop/electron,shiftkey/electron,astoilkov/electron,rajatsingla28/electron,pombredanne/electron,brenca/electron,ankitaggarwal011/electron,kcrt/electron,pombredanne/electron,simongregory/electron,simongregory/electron,thingsinjars/electron,brave/muon,renaesop/electron,electron/electron,aliib/electron,thompsonemerson/electron,the-ress/electron,jaanus/electron,shiftkey/electron,leftstick/electron,leethomas/electron,noikiy/electron,thomsonreuters/electron,brave/muon,renaesop/electron,bbondy/electron,gabriel/electron,etiktin/electron,gerhardberger/electron,minggo/electron,kcrt/electron,thingsinjars/electron,gabriel/electron,Gerhut/electron,jhen0409/electron,rreimann/electron,twolfson/electron,Evercoder/electron,wan-qy/electron,etiktin/electron,lzpfmh/electron,thompsonemerson/electron,miniak/electron,biblerule/UMCTelnetHub,dongjoon-hyun/electron,tinydew4/electron,posix4e/electron,lzpfmh/electron,simongregory/electron,roadev/electron,aliib/electron,MaxWhere/electron,ankitaggarwal011/electron,seanchas116/electron,shiftkey/electron,leftstick/electron,gerhardberger/electron,bpasero/electron,deed02392/electron,posix4e/electron,kcrt/electron,joaomoreno/atom-shell,deed02392/electron,aichingm/electron,gabriel/electron,lzpfmh/electron,felixrieseberg/electron,dongjoon-hyun/electron,the-ress/electron,felixrieseberg/electron,thompsonemerson/electron,kokdemo/electron,biblerule/UMCTelnetHub,stevekinney/electron,astoilkov/electron,brave/electron,Floato/electron,ankitaggarwal011/electron,thingsinjars/electron,pombredanne/electron,MaxWhere/electron,tylergibson/electron,joaomoreno/atom-shell,preco21/electron,bpasero/electron,leftstick/electron,tonyganch/electron,bpasero/electron,bpasero/electron,rreimann/electron,noikiy/electron,Evercoder/electron,rajatsingla28/electron,electron/electron,gerhardberger/electron,voidbridge/electron,bpasero/electron,kcrt/electron,aliib/electron,roadev/electron,biblerule/UMCTelnetHub,the-ress/electron,wan-qy/electron,roadev/electron,rreimann/electron,tonyganch/electron,thingsinjars/electron,kokdemo/electron,jaanus/electron,leethomas/electron,gerhardberger/electron,rreimann/electron,aliib/electron,renaesop/electron,simongregory/electron,noikiy/electron,ankitaggarwal011/electron,Evercoder/electron,aichingm/electron,tinydew4/electron,dongjoon-hyun/electron,kokdemo/electron,twolfson/electron,preco21/electron,wan-qy/electron,bbondy/electron,joaomoreno/atom-shell,deed02392/electron,brave/muon,jhen0409/electron,wan-qy/electron,kokdemo/electron,twolfson/electron,thingsinjars/electron,joaomoreno/atom-shell,aichingm/electron,tinydew4/electron,brenca/electron,deed02392/electron,Evercoder/electron,kokdemo/electron,Gerhut/electron,deed02392/electron,preco21/electron,stevekinney/electron,preco21/electron,ankitaggarwal011/electron,voidbridge/electron,rajatsingla28/electron,miniak/electron,brave/electron,thingsinjars/electron,noikiy/electron,kokdemo/electron,gabriel/electron,thomsonreuters/electron,felixrieseberg/electron,leethomas/electron,shiftkey/electron,stevekinney/electron,biblerule/UMCTelnetHub,brave/muon,tylergibson/electron,wan-qy/electron,aichingm/electron,astoilkov/electron,posix4e/electron,pombredanne/electron,jaanus/electron,thompsonemerson/electron,voidbridge/electron,gerhardberger/electron,Gerhut/electron,etiktin/electron,electron/electron,leftstick/electron,MaxWhere/electron,tinydew4/electron,evgenyzinoviev/electron,tylergibson/electron,renaesop/electron
|
script/lib/config.py
|
script/lib/config.py
|
#!/usr/bin/env python
import errno
import os
import platform
import sys
BASE_URL = os.getenv('LIBCHROMIUMCONTENT_MIRROR') or \
'https://s3.amazonaws.com/github-janky-artifacts/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'cfbe8ec7e14af4cabd1474386f54e197db1f7ac1'
PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def get_platform_key():
if os.environ.has_key('MAS_BUILD'):
return 'mas'
else:
return PLATFORM
def get_target_arch():
try:
target_arch_path = os.path.join(__file__, '..', '..', '..', 'vendor',
'brightray', 'vendor', 'download',
'libchromiumcontent', '.target_arch')
with open(os.path.normpath(target_arch_path)) as f:
return f.read().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
if PLATFORM == 'win32':
return 'ia32'
else:
return 'x64'
def get_chromedriver_version():
return 'v2.15'
def s3_config():
config = (os.environ.get('ATOM_SHELL_S3_BUCKET', ''),
os.environ.get('ATOM_SHELL_S3_ACCESS_KEY', ''),
os.environ.get('ATOM_SHELL_S3_SECRET_KEY', ''))
message = ('Error: Please set the $ATOM_SHELL_S3_BUCKET, '
'$ATOM_SHELL_S3_ACCESS_KEY, and '
'$ATOM_SHELL_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
#!/usr/bin/env python
import errno
import os
import platform
import sys
BASE_URL = os.getenv('LIBCHROMIUMCONTENT_MIRROR') or \
'http://github-janky-artifacts.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'cfbe8ec7e14af4cabd1474386f54e197db1f7ac1'
PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def get_platform_key():
if os.environ.has_key('MAS_BUILD'):
return 'mas'
else:
return PLATFORM
def get_target_arch():
try:
target_arch_path = os.path.join(__file__, '..', '..', '..', 'vendor',
'brightray', 'vendor', 'download',
'libchromiumcontent', '.target_arch')
with open(os.path.normpath(target_arch_path)) as f:
return f.read().strip()
except IOError as e:
if e.errno != errno.ENOENT:
raise
if PLATFORM == 'win32':
return 'ia32'
else:
return 'x64'
def get_chromedriver_version():
return 'v2.15'
def s3_config():
config = (os.environ.get('ATOM_SHELL_S3_BUCKET', ''),
os.environ.get('ATOM_SHELL_S3_ACCESS_KEY', ''),
os.environ.get('ATOM_SHELL_S3_SECRET_KEY', ''))
message = ('Error: Please set the $ATOM_SHELL_S3_BUCKET, '
'$ATOM_SHELL_S3_ACCESS_KEY, and '
'$ATOM_SHELL_S3_SECRET_KEY environment variables')
assert all(len(c) for c in config), message
return config
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
mit
|
Python
|
632f71651864517cc977f79dcdac7f3b0f516b49
|
Add example script to post experiment and task data
|
niekas/dakis,niekas/dakis,niekas/dakis
|
scripts/post_data.py
|
scripts/post_data.py
|
#!/usr/bin/env python3
import requests
domain = 'http://dakis.gimbutas.lt/api/'
exp_data = {
"description": "First successful post through API",
"algorithm": "TestTasks",
"neighbours": "Nearest",
"stopping_criteria": "x_dist",
"stopping_accuracy": "0.01",
"subregion": "simplex",
"inner_problem_accuracy": None,
"inner_problem_iters": 10,
"inner_problem_division": "LongesEdge",
"lipschitz_estimation": "min_allowed",
"simplex_division": "LongestEdge",
"valid": True,
"mistakes": "",
}
resp = requests.post(domain + 'experiments/', data=exp_data)
exp_url = resp.json()['url']
task_data = {
"func_name": "GKLS",
"func_cls": 1,
"func_id": 1,
"calls": 123,
"subregions": 1041,
"duration": "0.12",
"f_min": None,
"x_min": None,
"experiment": exp_url,
}
requests.post(domain + 'tasks/', data=task_data)
task_data['func_id'] = 2
task_data['calls'] = 213
requests.post(domain + 'tasks/', data=task_data)
|
agpl-3.0
|
Python
|
|
1c5ddc6803853e48eb77bd337fedbaabc56a0102
|
Add empty MultiLayerPercetrsons file.
|
MoriKen254/DeepLearningWithPython
|
MultiLayerNeuralNetworks/MultiLayerPerceptrons/MultiLayerPerceptrons.py
|
MultiLayerNeuralNetworks/MultiLayerPerceptrons/MultiLayerPerceptrons.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
u"""
Copyright (c) 2016 Masaru Morita
This software is released under the MIT License.
See LICENSE file included in this repository.
"""
|
mit
|
Python
|
|
5fc6b3c64b29dc5b17fec90f331cc7a2ca22704f
|
add main file
|
snowleung/pysms2email
|
sms2email.py
|
sms2email.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Work at python2.5, iphone4(ios6) with cydia, using hotmail to send the message.
'''
import sqlite3 as sql
import email
import os
import sys
import codecs
import string
import datetime
from Queue import Queue
import time
import threading
import pymail
EMAIL_CONTENT = '''Author:${author}\nTEXT:\n${text}\n${date}\n\n\n'''
reload(sys)
sys.setdefaultencoding('utf8')
streamWriter = codecs.lookup('utf-8')[-1]
sys.stdout = streamWriter(sys.stdout)
UPDATE_DATE = -1
UPDATE_CHECK_SECONDS = 30
SMSDB_PATH = '/var/mobile/Library/SMS/sms.db'
SQL_QUERY_TEMPLATE = string.Template(
'''select date, hd.id, text from message as msg, handle as hd where msg.handle_id=hd.rowid and date>${date} order by msg.date desc limit 10''')
# sql index
# date=0
# author=1
# text=2
SMSDB = sql.connect(SMSDB_PATH)
SMSDB_CURSOR = SMSDB.cursor()
mail = pymail.Pymail(os.environ.get('USER_MAIL'), os.environ.get('USER_PASSWD'), os.environ.get('MAIL_TO'))
mq = Queue()
def email_sender():
'''worker
'''
item = mq.get()
if item:
mail.send_mail('SMS on IPhone4', msg_body)
mq.task_done()
def message_date(mac_time):
'''see: http://stackoverflow.com/questions/10746562/parsing-date-field-of-iphone-sms-file-from-backup
'''
unix_time = int(mac_time) + 978307200
date = datetime.datetime.fromtimestamp(unix_time)
return date
def build_content(message_data):
msg_body = ''
for m in message_data:
_body = string.Template(EMAIL_CONTENT)
msg_body += _body.safe_substitute(author=str(m[1]), text=m[2], date=message_date(m[0]))
return msg_body
if __name__ == '__main__':
print 'worker sender is OK'
while(1):
if UPDATE_DATE > 0:
SMSDB_CURSOR.execute(SQL_QUERY_TEMPLATE.safe_substitute(date=UPDATE_DATE))
message_data = SMSDB_CURSOR.fetchall()
if message_data:
UPDATE_DATE = int(message_data[0][0])
msg_body = build_content(message_data)
mq.put(msg_body)
t = threading.Thread(target=email_sender)
t.daemon = True
t.start()
time.sleep(UPDATE_CHECK_SECONDS)
else:
# INIT
SMSDB_CURSOR.execute('''select date, hd.id, text from message as msg, handle as hd where msg.handle_id=hd.rowid order by msg.date desc limit 2''')
message_data = SMSDB_CURSOR.fetchall()
UPDATE_DATE = int(message_data[0][0])
msg_body = build_content(message_data)
mail.send_mail('SMS Monitor', 'init OK, SMS monitor has is running. recent messge is \n' + msg_body)
|
apache-2.0
|
Python
|
|
4e8d4f21749a329dd114926d3654512e9842a1e1
|
Change FULL_NAME_FUNCTION to GET_FULL_NAME_FUNCTION.
|
UWIT-IAM/iam-idbase,jeffFranklin/iam-idbase,UWIT-IAM/iam-idbase,jeffFranklin/iam-idbase,jeffFranklin/iam-idbase,UWIT-IAM/iam-idbase
|
idbase/views.py
|
idbase/views.py
|
from django.shortcuts import render, redirect
from django.conf import settings
from idbase.exceptions import InvalidSessionError
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
def index(request, template=None):
"""Render the Identity home page."""
conf = {'urls': settings.CORE_URLS}
return render(request, 'idbase/index.html', conf)
def login(request):
"""This view gets SSO-protected and redirects to next."""
if request.user.is_authenticated():
logger.info('User %s logged in' % (request.user.username))
if (request.user.get_full_name() is None and
hasattr(settings, 'GET_FULL_NAME_FUNCTION')):
mod, func = settings.GET_FULL_NAME_FUNCTION.rsplit('.', 1)
module = import_module(mod)
full_name_function = getattr(module, func)
request.user.set_full_name(full_name_function(request))
return redirect(request.GET.get('next', '/'))
else:
raise InvalidSessionError('no REMOTE_USER variable set')
|
from django.shortcuts import render, redirect
from django.conf import settings
from idbase.exceptions import InvalidSessionError
import logging
from importlib import import_module
logger = logging.getLogger(__name__)
def index(request, template=None):
"""Render the Identity home page."""
conf = {'urls': settings.CORE_URLS}
return render(request, 'idbase/index.html', conf)
def login(request):
"""This view gets SSO-protected and redirects to next."""
if request.user.is_authenticated():
logger.info('User %s logged in' % (request.user.username))
if (request.user.get_full_name() is None and
hasattr(settings, 'FULL_NAME_FUNCTION')):
mod, func = settings.FULL_NAME_FUNCTION.rsplit('.', 1)
module = import_module(mod)
full_name_function = getattr(module, func)
request.user.set_full_name(full_name_function(request))
return redirect(request.GET.get('next', '/'))
else:
raise InvalidSessionError('no REMOTE_USER variable set')
|
apache-2.0
|
Python
|
e04bf5dd12a1f5e28258541dcf9d2eb8c5567ad0
|
Add tests for lead price
|
Jamil/sabre_dev_studio
|
tests/lead_price_tests.py
|
tests/lead_price_tests.py
|
import unittest
import datetime
import json
import sys
sys.path.append('..')
import sabre_dev_studio
import sabre_dev_studio.sabre_exceptions as sabre_exceptions
'''
requires config.json in the same directory for api authentication
{
"sabre_client_id": -----,
"sabre_client_secret": -----
}
'''
class TestBasicLeadPrice(unittest.TestCase):
def read_config(self):
raw_data = open('config.json').read()
data = json.loads(raw_data)
client_secret = data['sabre_client_secret']
client_id = data['sabre_client_id']
return (client_id, client_secret)
def setUp(self):
# Read from config
self.client_id, self.client_secret = self.read_config()
self.sds = sabre_dev_studio.SabreDevStudio()
self.sds.set_credentials(self.client_id, self.client_secret)
self.sds.authenticate()
def test_request_with_args(self):
prices = self.sds.lead_price('YTO', 'SFO', [3,4])
self.assertIsNotNone(prices)
def test_basic_request(self):
opts = {
'origin': 'YTO',
'destination': 'SFO',
'lengthofstay': [3,4]
}
prices = self.sds.lead_price_opts(opts)
self.assertIsNotNone(prices)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
|
b7c22cddecb743e9597c92160e3aa0100e149e19
|
Introduce hades test fixtures and first tests.
|
agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft
|
tests/model/test_hades.py
|
tests/model/test_hades.py
|
from datetime import datetime, timedelta
from pycroft.model import session
from pycroft.model.hades import radgroup_property_mappings, radcheck
from tests import FactoryDataTestBase
from tests.factories import PropertyGroupFactory, MembershipFactory, UserWithHostFactory, \
SwitchFactory, PatchPortFactory
class HadesViewTest(FactoryDataTestBase):
def create_factories(self):
self.user = UserWithHostFactory.create()
self.network_access_group = PropertyGroupFactory.create(
name="Member",
granted={'network_access'},
)
self.blocked_by_finance_group = PropertyGroupFactory.create(
name="Blocked (finance)",
granted={'blocked_by_finance'},
denied={'network_access'},
)
self.blocked_by_traffic_group = PropertyGroupFactory.create(
name="Blocked (traffic)",
granted={'blocked_by_traffic'},
denied={'network_access'},
)
# the user's room needs to be connected to provide `nasipaddress` and `nasportid`
# TODO: remove owner and see if things still work
self.switch = SwitchFactory.create(host__owner=self.user)
PatchPortFactory.create_batch(2, patched=True, switch_port__switch=self.switch,
# This needs to be the HOSTS room!
room=self.user.hosts[0].room)
# TODO: create this membership in each test, not here
MembershipFactory.create(user=self.user, group=self.network_access_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
MembershipFactory.create(user=self.user, group=self.blocked_by_finance_group,
begins_at=datetime.now() + timedelta(-1),
ends_at=datetime.now() + timedelta(1))
session.session.execute(radgroup_property_mappings.insert(values=[
{'property': 'blocked_by_finance', 'radgroup': 'finance'},
{'property': 'blocked_by_traffic', 'radgroup': 'traffic'},
]))
def test_radcheck(self):
# <mac> - <nasip> - <nasport> - "Cleartext-Password" - := - <mac> - 10
# We have one interface with a MAC whose room has two ports on the same switch
rows = session.session.query(radcheck.table).all()
host = self.user.hosts[0]
mac = host.interfaces[0].mac
for row in rows:
self.assertEqual(row.username, mac)
self.assertEqual(row.nasipaddress, self.switch.management_ip)
self.assertEqual(row.attribute, "Cleartext-Password")
self.assertEqual(row.op, ":=")
self.assertEqual(row.value, mac)
self.assertEqual(row.priority, 10)
self.assertEqual({row.nasportid for row in rows},
{port.switch_port.name for port in host.room.patch_ports})
# TODO: Put Entries in some basetable to test tagged vlans (separate test)
# TODO: test radreply, radgroupreply (with base, see above), radgroupcheck
|
apache-2.0
|
Python
|
|
33bb7c4e026d46dda184d682c89fad7481ab1a77
|
Add migration script.
|
shin-/docker.github.io,shin-/docker.github.io,denverdino/denverdino.github.io,johnstep/docker.github.io,JimGalasyn/docker.github.io,docker-zh/docker.github.io,rillig/docker.github.io,sdurrheimer/compose,shin-/docker.github.io,jzwlqx/denverdino.github.io,JimGalasyn/docker.github.io,gdevillele/docker.github.io,docker-zh/docker.github.io,phiroict/docker,londoncalling/docker.github.io,shubheksha/docker.github.io,rillig/docker.github.io,danix800/docker.github.io,denverdino/denverdino.github.io,shin-/docker.github.io,thaJeztah/docker.github.io,phiroict/docker,gdevillele/docker.github.io,bdwill/docker.github.io,denverdino/docker.github.io,danix800/docker.github.io,joaofnfernandes/docker.github.io,bdwill/docker.github.io,docker-zh/docker.github.io,jzwlqx/denverdino.github.io,LuisBosquez/docker.github.io,LuisBosquez/docker.github.io,londoncalling/docker.github.io,alexisbellido/docker.github.io,docker/docker.github.io,joeuo/docker.github.io,londoncalling/docker.github.io,anweiss/docker.github.io,shin-/docker.github.io,joeuo/docker.github.io,phiroict/docker,docker-zh/docker.github.io,denverdino/denverdino.github.io,hoogenm/compose,troy0820/docker.github.io,docker/docker.github.io,joaofnfernandes/docker.github.io,BSWANG/denverdino.github.io,aduermael/docker.github.io,joeuo/docker.github.io,menglingwei/denverdino.github.io,londoncalling/docker.github.io,gdevillele/docker.github.io,menglingwei/denverdino.github.io,rillig/docker.github.io,shubheksha/docker.github.io,swoopla/compose,docker/docker.github.io,anweiss/docker.github.io,danix800/docker.github.io,johnstep/docker.github.io,alexisbellido/docker.github.io,danix800/docker.github.io,BSWANG/denverdino.github.io,alexisbellido/docker.github.io,docker/docker.github.io,BSWANG/denverdino.github.io,jrabbit/compose,londoncalling/docker.github.io,alexisbellido/docker.github.io,LuisBosquez/docker.github.io,gdevillele/docker.github.io,schmunk42/compose,docker/docker.github.io,troy0820/docker.github.io,sanscontext/docker.github.io,docker-zh/docker.github.io,jzwlqx/denverdino.github.io,bdwill/docker.github.io,johnstep/docker.github.io,denverdino/denverdino.github.io,vdemeester/compose,jrabbit/compose,BSWANG/denverdino.github.io,dnephin/compose,funkyfuture/docker-compose,troy0820/docker.github.io,troy0820/docker.github.io,denverdino/docker.github.io,denverdino/docker.github.io,sdurrheimer/compose,thaJeztah/docker.github.io,vdemeester/compose,menglingwei/denverdino.github.io,sanscontext/docker.github.io,thaJeztah/compose,phiroict/docker,funkyfuture/docker-compose,thaJeztah/docker.github.io,shubheksha/docker.github.io,LuisBosquez/docker.github.io,shin-/compose,dnephin/compose,aduermael/docker.github.io,thaJeztah/docker.github.io,menglingwei/denverdino.github.io,thaJeztah/docker.github.io,denverdino/denverdino.github.io,JimGalasyn/docker.github.io,bdwill/docker.github.io,jzwlqx/denverdino.github.io,anweiss/docker.github.io,phiroict/docker,hoogenm/compose,joaofnfernandes/docker.github.io,JimGalasyn/docker.github.io,sanscontext/docker.github.io,joaofnfernandes/docker.github.io,denverdino/docker.github.io,johnstep/docker.github.io,sanscontext/docker.github.io,jzwlqx/denverdino.github.io,shubheksha/docker.github.io,aduermael/docker.github.io,johnstep/docker.github.io,thaJeztah/compose,gdevillele/docker.github.io,bdwill/docker.github.io,anweiss/docker.github.io,denverdino/docker.github.io,sanscontext/docker.github.io,joeuo/docker.github.io,swoopla/compose,LuisBosquez/docker.github.io,shubheksha/docker.github.io,rillig/docker.github.io,joaofnfernandes/docker.github.io,aduermael/docker.github.io,schmunk42/compose,alexisbellido/docker.github.io,menglingwei/denverdino.github.io,joeuo/docker.github.io,BSWANG/denverdino.github.io,shin-/compose,JimGalasyn/docker.github.io,anweiss/docker.github.io
|
contrib/migration/migrate-compose-file-v1-to-v2.py
|
contrib/migration/migrate-compose-file-v1-to-v2.py
|
#!/usr/bin/env python
"""
Migrate a Compose file from the V1 format in Compose 1.5 to the V2 format
supported by Compose 1.6+
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
import logging
import sys
import ruamel.yaml
log = logging.getLogger('migrate')
def migrate(content):
data = ruamel.yaml.load(content, ruamel.yaml.RoundTripLoader)
service_names = data.keys()
for name, service in data.items():
# remove links and external links
service.pop('links', None)
external_links = service.pop('external_links', None)
if external_links:
log.warn(
"Service {name} has external_links: {ext}, which are no longer "
"supported. See https://docs.docker.com/compose/networking/ "
"for options on how to connect external containers to the "
"compose network.".format(name=name, ext=external_links))
# net is now networks
if 'net' in service:
service['networks'] = [service.pop('net')]
# create build section
if 'dockerfile' in service:
service['build'] = {
'context': service.pop('build'),
'dockerfile': service.pop('dockerfile'),
}
# create logging section
if 'log_driver' in service:
service['logging'] = {'driver': service.pop('log_driver')}
if 'log_opt' in service:
service['logging']['options'] = service.pop('log_opt')
# volumes_from prefix with 'container:'
for idx, volume_from in enumerate(service.get('volumes_from', [])):
if volume_from.split(':', 1)[0] not in service_names:
service['volumes_from'][idx] = 'container:%s' % volume_from
data['services'] = {name: data.pop(name) for name in data.keys()}
data['version'] = 2
return data
def write(stream, new_format, indent, width):
ruamel.yaml.dump(
new_format,
stream,
Dumper=ruamel.yaml.RoundTripDumper,
indent=indent,
width=width)
def parse_opts(args):
parser = argparse.ArgumentParser()
parser.add_argument("filename", help="Compose file filename.")
parser.add_argument("-i", "--in-place", action='store_true')
parser.add_argument(
"--indent", type=int, default=2,
help="Number of spaces used to indent the output yaml.")
parser.add_argument(
"--width", type=int, default=80,
help="Number of spaces used as the output width.")
return parser.parse_args()
def main(args):
logging.basicConfig()
opts = parse_opts(args)
with open(opts.filename, 'r') as fh:
new_format = migrate(fh.read())
if opts.in_place:
output = open(opts.filename, 'w')
else:
output = sys.stdout
write(output, new_format, opts.indent, opts.width)
if __name__ == "__main__":
main(sys.argv)
|
apache-2.0
|
Python
|
|
5fc72b0a6efcd14196d33c8e0ba9b4b763ebf4d1
|
Add a parse_time example
|
dpshelio/sunpy,dpshelio/sunpy,dpshelio/sunpy
|
examples/parse_time.py
|
examples/parse_time.py
|
"""
================================================
Parsing times with sunpy.time.parse_time
================================================
Example to show some example usage of parse_time
"""
from datetime import datetime, date
import numpy as np
import pandas
from sunpy.time import parse_time
# dict used for coloring the terminal output
col = {'y': '\x1b[93m', 'g': '\x1b[92m', 'r': '\x1b[96m', 'bold': '\x1b[1m',
'end': '\x1b[0m'}
def print_time(*args, **kwargs):
'''Parses and pretty prints a parse_time compatible object
'''
# Parse the time
time = parse_time(*args, **kwargs) # Pass all arguments to parse_time
# Color and print to terminal
print(col['r'] + '\nInput string/object: ' + col['end'] +
col['bold'] + '{ts!r}'.format(ts=args[0])+col['end'])
print(col['r'] + 'Parsed Time: ' + col['end'] + col['y'] + col['bold'] +
'{time!r}'.format(time=time) + col['end'])
# Strings
print('\nSTRINGS')
print_time('2005-08-04T00:18:02.000', scale='tt')
print_time('20140101000001')
print_time('2016.05.04_21:08:12_TAI')
print_time('1995-12-31 23:59:60') # Leap second
print_time('1995-Dec-31 23:59:60')
# datetime
print('\nDATETIME')
print_time(datetime.now(), scale='tai')
print_time(date.today())
# numpy
print('\nnumpy.datetime64')
print_time(np.datetime64('1995-12-31 18:59:59-0500'))
print_time(np.arange('2005-02-01T00', '2005-02-01T10', dtype='datetime64'))
# astropy compatible times
print('\nAstroPy compatible')
print_time(1234.0, format='jd')
print_time('B1950.0', format='byear_str')
print_time('2001-03-22 00:01:44.732327132980', scale='utc',
location=('120d', '40d')) # pass location
# pandas
print_time(pandas.Timestamp(datetime(1966, 2, 3)))
print_time(
pandas.Series([[datetime(2012, 1, 1, 0, 0),
datetime(2012, 1, 2, 0, 0)],
[datetime(2012, 1, 3, 0, 0),
datetime(2012, 1, 4, 0, 0)]]))
|
bsd-2-clause
|
Python
|
|
71e9a3a7e867b0670871ac46834988b87787c272
|
Add import-hacks.py snippet file
|
cmey/surprising-snippets,cmey/surprising-snippets
|
import-hacks.py
|
import-hacks.py
|
# Can control the import mechanism in Python.
import importlib.abc
import logging
import sys
logging.getLogger().setLevel(logging.INFO)
class LoggingImporter(importlib.abc.Finder):
def find_module(self, name, path=None):
msg = "importing {} on {}".format(name, path)
logging.info(msg)
return None # None means "didn't match", move on to the next path Finder
sys.meta_path.append(LoggingImporter())
class BlockingFinder(importlib.abc.Finder):
def find_module(self, name, path=None):
if name in ['numpy']:
return BlockingLoader()
class BlockingLoader(importlib.abc.Loader):
def load_module(self, fullname):
if fullname not in sys.modules:
raise ImportError("Can't import excluded module {}".format(fullname))
return sys.modules[fullname]
sys.meta_path.append(BlockingFinder())
# combined in one single class:
class BlockingImporter(importlib.abc.Finder, importlib.abc.Loader):
def find_module(self, name, path=None):
if name in ['numpy']:
return self
def load_module(self, fullname):
if fullname not in sys.modules:
raise ImportError("Can't import excluded module {}".format(fullname))
return sys.modules[fullname]
sys.meta_path.append(BlockingImporter())
|
mit
|
Python
|
|
9d9e82487a6ad9494f65e484392334e89baf7b83
|
Add AnalyzerResult class
|
IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan,IPMITMO/statan
|
coala/coalib/results/AnalyzerResult.py
|
coala/coalib/results/AnalyzerResult.py
|
import uuid
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
class AnalyzerResult:
def __init__(self,
origin,
language: str,
language_ver: str,
project_name: str,
project_version: str,
source_file_path: str,
message: str,
severity: int=RESULT_SEVERITY.NORMAL,
diffs: (dict, None)=None,
confidence: int=100,
):
origin = origin or ''
if not isinstance(origin, str):
origin = origin.__class__.__name__
if severity not in RESULT_SEVERITY.reverse:
raise ValueError('severity is not a valid RESULT_SEVERITY')
self.origin = origin
self.language = language
self.language_ver = language_ver
self.project_name = project_name
self.project_version = project_version
self.source_file_path = source_file_path
self.message = message
self.severity = severity
if confidence < 0 or confidence > 100:
raise ValueError('Value of confidence should be between 0 and 100.')
self.confidence = confidence
self.diffs = diffs
self.id = uuid.uuid4().int
|
mit
|
Python
|
|
c087b8c5da7b97554e7461509cda298282dcde21
|
Add sfp_openstreetmaps
|
smicallef/spiderfoot,smicallef/spiderfoot,smicallef/spiderfoot
|
modules/sfp_openstreetmaps.py
|
modules/sfp_openstreetmaps.py
|
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------------------
# Name: sfp_openstreetmaps
# Purpose: SpiderFoot plug-in to retrieve latitude/longitude coordinates
# for physical addresses from Open Street Maps API.
#
# Author: Brendan Coles <bcoles@gmail.com>
#
# Created: 2018-10-27
# Copyright: (c) Brendan Coles 2018
# Licence: GPL
# -------------------------------------------------------------------------------
import json
import re
import time
import urllib
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_openstreetmaps(SpiderFootPlugin):
"""Open Street Maps:Footprint,Investigate,Passive:Real World::Retrieves latitude/longitude coordinates for physical addresses from Open Street Maps API."""
opts = {
}
optdescs = {
}
results = dict()
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.results = dict()
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['PHYSICAL_ADDRESS']
# What events this module produces
def producedEvents(self):
return ['PHYSICAL_COORDINATES']
# Search for address
# https://operations.osmfoundation.org/policies/nominatim/
def query(self, qry):
params = {
'q': qry,
'format': 'json',
'polygon': '0',
'addressdetails': '0'
}
res = self.sf.fetchUrl("https://nominatim.openstreetmap.org/search?" + urllib.urlencode(params),
timeout=self.opts['_fetchtimeout'], useragent='SpiderFoot')
if res['content'] is None:
self.sf.info("No location info found for " + qry)
return None
try:
data = json.loads(res['content'])
except Exception as e:
self.sf.debug("Error processing JSON response: " + str(e))
return None
return data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
if eventData in self.results:
self.sf.debug("Skipping " + eventData + " as already mapped.")
return None
else:
self.results[eventData] = True
address = eventData
# Skip post office boxes
if address.lower().startswith('po box'):
self.sf.debug("Skipping PO BOX address")
return None
# Remove address prefixes for delivery instructions
address = re.sub(r'^(c/o|care of|attn:|attention:)\s+[0-9a-z\s\.],', r'', address, flags=re.IGNORECASE)
# Remove address prefixes known to return no results (floor, level, suite, etc).
address = re.sub(r'^(Level|Floor|Suite|Room)\s+[0-9a-z]+,', r'', address, flags=re.IGNORECASE)
# Search for address
data = self.query(eventData)
# Usage Policy mandates no more than 1 request per second
time.sleep(1)
if data is None:
self.sf.debug("Found no results for " + eventData)
return None
self.sf.info("Found " + str(len(data)) + " matches for " + eventData)
for location in data:
lat = str(location.get('lat'))
lon = str(location.get('lon'))
if not lat or not lon:
continue
coords = lat + "," + lon
self.sf.debug("Found coordinates: " + coords)
evt = SpiderFootEvent("PHYSICAL_COORDINATES", coords, self.__name__, event)
self.notifyListeners(evt)
# End of sfp_openstreetmaps class
|
mit
|
Python
|
|
60e4269027adc05db7b585ab51334c8d28cd7a1c
|
Add assignment writer
|
jhamrick/original-nbgrader,jhamrick/original-nbgrader
|
nbgrader/assignment_writer.py
|
nbgrader/assignment_writer.py
|
"""Based on the FilesWriter class included with IPython."""
import io
import os
import glob
import shutil
from IPython.utils.traitlets import Unicode
from IPython.utils.path import ensure_dir_exists
from IPython.utils.py3compat import unicode_type
from IPython.nbconvert.writers.base import WriterBase
class AssignmentWriter(WriterBase):
build_directory = Unicode(
".", config=True, help="Directory to write output to.")
# Make sure that the output directory exists.
def _build_directory_changed(self, name, old, new):
if new:
ensure_dir_exists(new)
def __init__(self, **kw):
super(AssignmentWriter, self).__init__(**kw)
self._build_directory_changed(
'build_directory', self.build_directory, self.build_directory)
def _makedir(self, path):
"""Make a directory if it doesn't already exist"""
if path:
self.log.info("Making directory %s", path)
ensure_dir_exists(path)
def write(self, output, resources, notebook_name=None, **kw):
"""Consume and write Jinja output to the file system. Output
directory is set via the 'build_directory' variable of this
instance (a configurable).
See base for more...
"""
# Verify that a notebook name is provided.
if notebook_name is None:
raise TypeError('notebook_name')
# Pull the extension and subdir from the resources dict.
output_extension = resources.get('output_extension', None)
# Copy referenced files to output directory
if self.build_directory:
for filename in self.files:
# Copy files that match search pattern
for matching_filename in glob.glob(filename):
# Make sure folder exists.
dest = os.path.join(
self.build_directory, matching_filename)
path = os.path.dirname(dest)
self._makedir(path)
# Copy if destination is different.
if not os.path.normpath(dest) == os.path.normpath(matching_filename):
self.log.info("Copying %s -> %s",
matching_filename, dest)
shutil.copy(matching_filename, dest)
# Determine where to write conversion results.
if output_extension is not None:
dest = notebook_name + '.' + output_extension
else:
dest = notebook_name
if self.build_directory:
dest = os.path.join(self.build_directory, dest)
# Write conversion results.
self.log.info("Writing %i bytes to %s", len(output), dest)
if isinstance(output, unicode_type):
with io.open(dest, 'w', encoding='utf-8') as f:
f.write(output)
else:
with io.open(dest, 'wb') as f:
f.write(output)
return dest
|
mit
|
Python
|
|
a998eaec11c9ec53e593e2a25542eabab0f75890
|
Create __init__.py
|
SpaceHotDog/Flask_API
|
app/__init__.py
|
app/__init__.py
|
# app/__init__.py
from flask_api import FlaskAPI
from flask_sqlalchemy import SQLAlchemy
# local import
from instance.config import app_config
# initialize sql-alchemy
db = SQLAlchemy()
def create_app(config_name):
app = FlaskAPI(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
return app
|
unlicense
|
Python
|
|
8abd31b54d4a767a3d04b82b3f6b8c9cae53222e
|
add build file
|
fdelbos/coolforms
|
build.py
|
build.py
|
#! /usr/bin/env python
#
# build.py
#
# Created by Frederic DELBOS <fred.delbos@gmail.com> on Nov 22 2013.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
#
import subprocess, argparse, sys, time, os
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
template_files = [
'container',
'controller',
'email',
'header',
'line',
'password',
'text',
]
coffee_files = [
'directives/container',
'directives/controller',
'directives/email',
'directives/header',
'directives/line',
'directives/password',
'directives/text',
'validation/service',
]
outname = "coolforms"
def make_templates():
templates = "templates =\n"
for name in template_files:
fd = open(os.path.join('./src/html/', name + '.html'), 'r')
content = fd.read()
fd.close()
templates += " %s: \"\"\"%s\"\"\"\n" % (name, content)
return templates
def make_coffees():
coffees = ""
for name in coffee_files:
fd = open(os.path.join('./src/coffee/', name + '.coffee'), 'r')
content = fd.read()
fd.close()
coffees += "%s\n" % content
return coffees
def make_build():
print "building %s.js" % outname
content = "## generated file do not edit\n\n%s\n%s" % (make_templates(), make_coffees())
fd = open('%s.coffee' % outname, 'w')
fd.write(content)
fd.close()
return True if subprocess.call(['coffee', '--compile', '%s.coffee' % outname]) == 0 else False
class FileChangeHandler(FileSystemEventHandler):
def on_any_event(self, event):
if event.src_path.split('/')[-1][0] == '.':
return
print "change detected at: %s" % event.src_path
if make_build() is False:
print "build failed!"
def watch_files():
make_build()
event_handler = FileChangeHandler()
observer = Observer()
observer.schedule(event_handler, "./src", recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("action", help="can be: 'build', 'watch', or 'minify'")
args = parser.parse_args()
action = args.action
if action == "build":
if make_build() is False:
print "build failed!"
sys.exit(1)
elif action == "watch":
print "watching for file change"
watch_files()
elif action == "minify":
print "minifying"
if make_build() == False:
print "build failed!"
sys.exit(1)
else:
print "Unknow command!"
sys.exit(1)
|
mit
|
Python
|
|
633b6f4c5cecda938f02ff6ccaa529de7b47ce67
|
Initialize excelToCSV
|
JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials
|
books/AutomateTheBoringStuffWithPython/Chapter14/PracticeProject/excelToCSV.py
|
books/AutomateTheBoringStuffWithPython/Chapter14/PracticeProject/excelToCSV.py
|
# Using the openpyxl module, write a program that reads all the Excel files in the
# current working directory and outputs them as CSV files.
# A single Excel file might contain multiple sheets; you’ll have to create one CSV
# file per sheet. The filenames of the CSV files should be
# <excel filename>_<sheet title>.csv, where <excel filename> is the filename of the
# Excel file without the file extension (for example, 'spam_data', not
# 'spam_data.xlsx') and <sheet title> is the string from the Worksheet object’s title
# variable.
#
# Note:
# - Example Excel files can be downloaded from http://nostarch.com/automatestuff/
for excelFile in os.listdir('.'):
# Skip non-xlsx files, load the workbook object.
for sheetName in wb.get_sheet_names():
# Loop through every sheet in the workbook.
sheet = wb.get_sheet_by_name(sheetName)
# Create the CSV filename from the Excel filename and sheet title.
# Create the csv.writer object for this CSV file.
# Loop through every row in the sheet.
for rowNum in range(1, sheet.get_highest_row() + 1):
rowData = [] # append each cell to this list
# Loop through each cell in the row.
for colNum in range(1, sheet.get_highest_column() + 1):
# Append each cell's data to rowData.
# Write the rowData list to the CSV file.
csvFile.close()
|
mit
|
Python
|
|
09181bd4c11501fa9e8274651370e45ac8d83316
|
add vars01.py
|
devlights/try-python
|
trypython/basic/vars01.py
|
trypython/basic/vars01.py
|
# coding: utf-8
"""
vars()についてのサンプルです。
"""
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# vars() は、引数無しで呼ぶと locals() と同じ
# 引数を付与して呼ぶと、そのオブジェクトの __dict__ を返す
#
x = 10
y = 20
pr('vars()', vars())
self.fn01(10, 20, 30, **dict(apple=100, pineapple=200))
Sample.fn02(10, 20, 30, **dict(apple=100, pineapple=200))
Sample.fn03(10, 20, 30, **dict(apple=100, pineapple=200))
def fn01(self, *args, **kwargs):
pr('vars() in method', vars())
@classmethod
def fn02(cls, *args, **kwargs):
pr('vars() in class method', vars())
@staticmethod
def fn03(*args, **kwargs):
pr('vars() in static method', vars())
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
mit
|
Python
|
|
7577933c1e016ee6d4bca73bfa3434dc19c55315
|
add basic initialization
|
sammdot/circa
|
circa.py
|
circa.py
|
import logging
import client
class Circa(client.Client):
def __init__(self, conf):
conf["autoconn"] = False
logging.basicConfig(filename=conf.get("log", "circa.log"), level=logging.INFO,
style="%", format="%(asctime)s %(levelname)s %(message)s")
if "log" in conf:
logging.basicConfig(filename=conf["log"])
client.Client.__init__(self, **conf)
|
bsd-3-clause
|
Python
|
|
ed479cf11540c6a67bb4b51eed42b91abd869090
|
Add html_doc.py
|
Samuel-L/cli-ws,Samuel-L/cli-ws
|
tests/test_files/html_doc.py
|
tests/test_files/html_doc.py
|
doc = """<!DOCTYPE html>
<html>
<head>
<title>Html document</title>
</head>
<body>
<div>
<h1>H1 Tag</h1>
<h1 class='class-name'>H1 Tag with class</h1>
<h1 id='id-name'>H1 Tag with id</h1>
<p class='class-name'>P tag with class</p>
</div>
</body>
</html>"""
|
mit
|
Python
|
|
308744d8a022c7fc25af4f2ef8a6214cdcf014f9
|
Add additional attributes to GPSLogger (#4755)
|
MungoRae/home-assistant,LinuxChristian/home-assistant,stefan-jonasson/home-assistant,jawilson/home-assistant,sdague/home-assistant,jamespcole/home-assistant,jawilson/home-assistant,molobrakos/home-assistant,auduny/home-assistant,nugget/home-assistant,nkgilley/home-assistant,morphis/home-assistant,auduny/home-assistant,kyvinh/home-assistant,molobrakos/home-assistant,morphis/home-assistant,keerts/home-assistant,LinuxChristian/home-assistant,xifle/home-assistant,xifle/home-assistant,adrienbrault/home-assistant,shaftoe/home-assistant,miniconfig/home-assistant,stefan-jonasson/home-assistant,nugget/home-assistant,ma314smith/home-assistant,HydrelioxGitHub/home-assistant,robjohnson189/home-assistant,alexmogavero/home-assistant,DavidLP/home-assistant,Zac-HD/home-assistant,Duoxilian/home-assistant,sdague/home-assistant,lukas-hetzenecker/home-assistant,MungoRae/home-assistant,alexmogavero/home-assistant,jabesq/home-assistant,robjohnson189/home-assistant,turbokongen/home-assistant,leppa/home-assistant,florianholzapfel/home-assistant,mezz64/home-assistant,qedi-r/home-assistant,open-homeautomation/home-assistant,jamespcole/home-assistant,eagleamon/home-assistant,Zac-HD/home-assistant,MungoRae/home-assistant,rohitranjan1991/home-assistant,Teagan42/home-assistant,kyvinh/home-assistant,balloob/home-assistant,home-assistant/home-assistant,aequitas/home-assistant,ma314smith/home-assistant,FreekingDean/home-assistant,persandstrom/home-assistant,rohitranjan1991/home-assistant,fbradyirl/home-assistant,robjohnson189/home-assistant,leppa/home-assistant,balloob/home-assistant,HydrelioxGitHub/home-assistant,jabesq/home-assistant,auduny/home-assistant,tboyce1/home-assistant,kennedyshead/home-assistant,dmeulen/home-assistant,pschmitt/home-assistant,LinuxChristian/home-assistant,happyleavesaoc/home-assistant,fbradyirl/home-assistant,dmeulen/home-assistant,postlund/home-assistant,open-homeautomation/home-assistant,DavidLP/home-assistant,home-assistant/home-assistant,MungoRae/home-assistant,miniconfig/home-assistant,tchellomello/home-assistant,florianholzapfel/home-assistant,keerts/home-assistant,PetePriority/home-assistant,HydrelioxGitHub/home-assistant,aequitas/home-assistant,kyvinh/home-assistant,soldag/home-assistant,xifle/home-assistant,JshWright/home-assistant,Duoxilian/home-assistant,balloob/home-assistant,jnewland/home-assistant,kyvinh/home-assistant,happyleavesaoc/home-assistant,miniconfig/home-assistant,toddeye/home-assistant,FreekingDean/home-assistant,jnewland/home-assistant,open-homeautomation/home-assistant,persandstrom/home-assistant,keerts/home-assistant,jamespcole/home-assistant,w1ll1am23/home-assistant,xifle/home-assistant,Danielhiversen/home-assistant,mezz64/home-assistant,aronsky/home-assistant,eagleamon/home-assistant,Cinntax/home-assistant,MartinHjelmare/home-assistant,dmeulen/home-assistant,tinloaf/home-assistant,aronsky/home-assistant,nkgilley/home-assistant,alexmogavero/home-assistant,sander76/home-assistant,shaftoe/home-assistant,persandstrom/home-assistant,shaftoe/home-assistant,florianholzapfel/home-assistant,PetePriority/home-assistant,alexmogavero/home-assistant,postlund/home-assistant,jnewland/home-assistant,morphis/home-assistant,tboyce1/home-assistant,ct-23/home-assistant,fbradyirl/home-assistant,ct-23/home-assistant,robbiet480/home-assistant,soldag/home-assistant,tboyce021/home-assistant,partofthething/home-assistant,MartinHjelmare/home-assistant,eagleamon/home-assistant,ewandor/home-assistant,Cinntax/home-assistant,mKeRix/home-assistant,robbiet480/home-assistant,ma314smith/home-assistant,happyleavesaoc/home-assistant,tinloaf/home-assistant,JshWright/home-assistant,aequitas/home-assistant,mKeRix/home-assistant,kennedyshead/home-assistant,LinuxChristian/home-assistant,LinuxChristian/home-assistant,rohitranjan1991/home-assistant,Danielhiversen/home-assistant,MartinHjelmare/home-assistant,jabesq/home-assistant,pschmitt/home-assistant,ct-23/home-assistant,GenericStudent/home-assistant,open-homeautomation/home-assistant,titilambert/home-assistant,JshWright/home-assistant,tchellomello/home-assistant,GenericStudent/home-assistant,tboyce021/home-assistant,molobrakos/home-assistant,adrienbrault/home-assistant,shaftoe/home-assistant,keerts/home-assistant,Duoxilian/home-assistant,ct-23/home-assistant,sander76/home-assistant,joopert/home-assistant,qedi-r/home-assistant,tboyce1/home-assistant,titilambert/home-assistant,robjohnson189/home-assistant,ewandor/home-assistant,miniconfig/home-assistant,morphis/home-assistant,ma314smith/home-assistant,lukas-hetzenecker/home-assistant,DavidLP/home-assistant,eagleamon/home-assistant,tinloaf/home-assistant,dmeulen/home-assistant,turbokongen/home-assistant,nugget/home-assistant,mKeRix/home-assistant,toddeye/home-assistant,MungoRae/home-assistant,florianholzapfel/home-assistant,Teagan42/home-assistant,Duoxilian/home-assistant,joopert/home-assistant,stefan-jonasson/home-assistant,PetePriority/home-assistant,ewandor/home-assistant,mKeRix/home-assistant,JshWright/home-assistant,w1ll1am23/home-assistant,stefan-jonasson/home-assistant,tboyce1/home-assistant,Zac-HD/home-assistant,happyleavesaoc/home-assistant,ct-23/home-assistant,partofthething/home-assistant,Zac-HD/home-assistant
|
homeassistant/components/device_tracker/gpslogger.py
|
homeassistant/components/device_tracker/gpslogger.py
|
"""
Support for the GPSLogger platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.gpslogger/
"""
import asyncio
from functools import partial
import logging
from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY
from homeassistant.components.http import HomeAssistantView
# pylint: disable=unused-import
from homeassistant.components.device_tracker import ( # NOQA
DOMAIN, PLATFORM_SCHEMA)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
def setup_scanner(hass, config, see):
"""Setup an endpoint for the GPSLogger application."""
hass.http.register_view(GPSLoggerView(see))
return True
class GPSLoggerView(HomeAssistantView):
"""View to handle gpslogger requests."""
url = '/api/gpslogger'
name = 'api:gpslogger'
def __init__(self, see):
"""Initialize GPSLogger url endpoints."""
self.see = see
@asyncio.coroutine
def get(self, request):
"""A GPSLogger message received as GET."""
res = yield from self._handle(request.app['hass'], request.GET)
return res
@asyncio.coroutine
def _handle(self, hass, data):
"""Handle gpslogger request."""
if 'latitude' not in data or 'longitude' not in data:
return ('Latitude and longitude not specified.',
HTTP_UNPROCESSABLE_ENTITY)
if 'device' not in data:
_LOGGER.error('Device id not specified.')
return ('Device id not specified.',
HTTP_UNPROCESSABLE_ENTITY)
device = data['device'].replace('-', '')
gps_location = (data['latitude'], data['longitude'])
accuracy = 200
battery = -1
if 'accuracy' in data:
accuracy = int(float(data['accuracy']))
if 'battery' in data:
battery = float(data['battery'])
attrs = {}
if 'speed' in data:
attrs['speed'] = float(data['speed'])
if 'direction' in data:
attrs['direction'] = float(data['direction'])
if 'altitude' in data:
attrs['altitude'] = float(data['altitude'])
if 'provider' in data:
attrs['provider'] = data['provider']
yield from hass.loop.run_in_executor(
None, partial(self.see, dev_id=device,
gps=gps_location, battery=battery,
gps_accuracy=accuracy,
attributes=attrs))
return 'Setting location for {}'.format(device)
|
"""
Support for the GPSLogger platform.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.gpslogger/
"""
import asyncio
from functools import partial
import logging
from homeassistant.const import HTTP_UNPROCESSABLE_ENTITY
from homeassistant.components.http import HomeAssistantView
# pylint: disable=unused-import
from homeassistant.components.device_tracker import ( # NOQA
DOMAIN, PLATFORM_SCHEMA)
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['http']
def setup_scanner(hass, config, see):
"""Setup an endpoint for the GPSLogger application."""
hass.http.register_view(GPSLoggerView(see))
return True
class GPSLoggerView(HomeAssistantView):
"""View to handle gpslogger requests."""
url = '/api/gpslogger'
name = 'api:gpslogger'
def __init__(self, see):
"""Initialize GPSLogger url endpoints."""
self.see = see
@asyncio.coroutine
def get(self, request):
"""A GPSLogger message received as GET."""
res = yield from self._handle(request.app['hass'], request.GET)
return res
@asyncio.coroutine
def _handle(self, hass, data):
"""Handle gpslogger request."""
if 'latitude' not in data or 'longitude' not in data:
return ('Latitude and longitude not specified.',
HTTP_UNPROCESSABLE_ENTITY)
if 'device' not in data:
_LOGGER.error('Device id not specified.')
return ('Device id not specified.',
HTTP_UNPROCESSABLE_ENTITY)
device = data['device'].replace('-', '')
gps_location = (data['latitude'], data['longitude'])
accuracy = 200
battery = -1
if 'accuracy' in data:
accuracy = int(float(data['accuracy']))
if 'battery' in data:
battery = float(data['battery'])
yield from hass.loop.run_in_executor(
None, partial(self.see, dev_id=device,
gps=gps_location, battery=battery,
gps_accuracy=accuracy))
return 'Setting location for {}'.format(device)
|
apache-2.0
|
Python
|
e9ccfeb5b1bc5e30756075c1afd229f3691ac5ac
|
add python translation of centered smd_nfw
|
jesford/smd_nfw,jesford/smd-nfw
|
smd_nfw.py
|
smd_nfw.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
from astropy import units
rbins = np.loadtxt('smd_in1.dat')
nbins = rbins.shape[0]
rbins = rbins * units.Mpc
data = np.loadtxt('smd_in2.dat')
nlens = data.shape[0]
rs = data[:,0] * units.Mpc
delta_c = data[:,1] #dimensionless
rho_crit = data[:,2] #assumed to be in Msun/pc^3
sig_center = data[:,3] * units.Mpc
rho_crit = rho_crit * 10.**6 * units.Msun / (units.Mpc * units.pc * units.pc)
#if sig_center[0] == 0:
rbins_repeated = rbins.reshape(nbins,1).repeat(nlens,1)
rs_repeated = rs.reshape(nlens,1).repeat(nbins,1)
x = rbins_repeated.T/rs_repeated
x = x.value
#the 3 cases of dimensionless radius x
x_small = np.where(x < 1.-1.e-6)
x_big = np.where(x > 1.+1.e-6)
x_one = np.where(np.abs(x-1) <= 1.e-6)
#------------------------------------------------------------------------------
# calculate f
bigF = np.zeros_like(x)
f = np.zeros_like(x)
bigF[x_small] = (np.log((1./x[x_small]) + np.sqrt((1./(x[x_small]**2)) - 1.))
/ np.sqrt(1.-(x[x_small]**2)))
#check notes for whether acos == arccos?
bigF[x_big] = np.arccos(1./x[x_big]) / np.sqrt(x[x_big]**2 - 1.)
f = (1. - bigF) / (x**2 - 1.)
f[x_one] = 1./3.
if np.isnan(np.sum(f)) or np.isinf(np.sum(f)):
print('\nERROR: f is not all real\n', f)
#------------------------------------------------------------------------------
# calculate g
firstpart = np.zeros_like(x)
secondpart = np.zeros_like(x)
g = np.zeros_like(x)
firstpart[x_small] = (( (4./x[x_small]**2) + (2./(x[x_small]**2 - 1.)) )
/ np.sqrt(1. - x[x_small]**2))
firstpart[x_big] = (8./(x[x_big]**2 * np.sqrt(x[x_big]**2 - 1.)) +
4./((x[x_big]**2-1.)**1.5))
secondpart[x_small] = np.log((1. + np.sqrt((1. - x[x_small])/(1. + x[x_small])))/
(1. - np.sqrt((1. - x[x_small])/(1. + x[x_small]))))
secondpart[x_big] = np.arctan(np.sqrt((x[x_big] - 1.)/(1. + x[x_big])))
g = firstpart*secondpart + (4./(x**2))*np.log(x/2.) - (2./(x**2-1.))
g[x_one] = (10./3.) + 4.*np.log(0.5)
if np.isnan(np.sum(g)) or np.isinf(np.sum(g)):
print('\nERROR: g is not all real\n', g)
#------------------------------------------------------------------------------
# calculate h
h = np.zeros_like(x)
h = (bigF + np.log(x/2.))/(x**2)
h[x_one] = 1. + np.log(0.5)
if np.isnan(np.sum(h)) or np.isinf(np.sum(h)):
print('\nERROR: h is not all real\n', h)
#------------------------------------------------------------------------------
# calculate centered profiles
rs_dc_rcrit = rs*delta_c*rho_crit
rs_dc_rcrit_repeated = rs_dc_rcrit.value.reshape(nlens,1).repeat(nbins,1)
sigma_nfw = 2. * rs_dc_rcrit_repeated * f
mean_inside_sigma_nfw = 4. * rs_dc_rcrit_repeated * h
deltasigma_nfw = mean_inside_sigma_nfw - sigma_nfw
np.savetxt('sigma_PYTHON.dat', sigma_nfw, fmt='%15.8g')
np.savetxt('deltasigma_PYTHON.dat', deltasigma_nfw, fmt='%15.8g')
|
mit
|
Python
|
|
8f32a5be4ffc427a1fbc6bf700edc81b191e876f
|
add tests for basic funcionality
|
terceiro/squad,terceiro/squad,terceiro/squad,terceiro/squad
|
test/api/test_rest.py
|
test/api/test_rest.py
|
import json
from test.api import APIClient
from django.test import TestCase
from squad.core import models
from squad.ci import models as ci_models
class RestApiTest(TestCase):
def setUp(self):
self.group = models.Group.objects.create(slug='mygroup')
self.project = self.group.projects.create(slug='myproject')
self.build = self.project.builds.create(version='1')
self.environment = self.project.environments.create(slug='myenv')
self.testrun = self.build.test_runs.create(environment=self.environment, build=self.build)
self.backend = ci_models.Backend.objects.create(name='foobar')
self.testjob = self.build.test_jobs.create(
definition="foo: bar",
backend=self.backend,
target=self.project,
target_build=self.build,
build='1',
environment='myenv',
testrun=self.testrun
)
def hit(self, url):
response = self.client.get(url)
self.assertEqual(200, response.status_code)
text = response.content.decode('utf-8')
if response['Content-Type'] == 'application/json':
return json.loads(text)
else:
return text
def test_root(self):
self.hit('/api/')
def test_projects(self):
data = self.hit('/api/projects/')
self.assertEqual(1, len(data['results']))
def test_project_builds(self):
data = self.hit('/api/projects/%d/builds/' % self.project.id)
self.assertEqual(1, len(data['results']))
def test_builds(self):
data = self.hit('/api/builds/')
self.assertEqual(1, len(data['results']))
def test_build_testruns(self):
data = self.hit('/api/builds/%d/testruns/' % self.build.id)
self.assertEqual(1, len(data['results']))
def test_build_testjobs(self):
data = self.hit('/api/builds/%d/testjobs/' % self.build.id)
self.assertEqual(1, len(data['results']))
def test_testjob(self):
data = self.hit('/api/testjobs/%d/' % self.testjob.id)
self.assertEqual('myenv', data['environment'])
def test_testjob_definition(self):
data = self.hit('/api/testjobs/%d/definition/' % self.testjob.id)
self.assertEqual('foo: bar', data)
def test_backends(self):
data = self.hit('/api/backends/')
self.assertEqual('foobar', data['results'][0]['name'])
def test_environments(self):
data = self.hit('/api/environments/')
self.assertEqual('myenv', data['results'][0]['slug'])
|
agpl-3.0
|
Python
|
|
86c6dcc8fe0ac739ed1ae1a7898ea609fe959c61
|
add spectra extraction script
|
amirkdv/biseqt,amirkdv/biseqt,amirkdv/biseqt
|
spectra.py
|
spectra.py
|
import sqlite3
import igraph
import os
from matplotlib import pyplot as plt
db = 'genome.leishmania.hp_assembly.db'
wordlen = 10
base_dir = 'spectra-%d' % wordlen
pos_dir = os.path.join(base_dir, 'positive')
neg_dir = os.path.join(base_dir, 'negative')
num_bins = 1000
ylim = (0,100)
xlim = (-15000,15000)
G = igraph.read('leishmania_true.gml')
db_id_from_graph_id = lambda vid: int(G.vs[vid]['name'].split('#')[1])
true_overlaps = [set([db_id_from_graph_id(u), db_id_from_graph_id(v)]) for u,v in G.get_edgelist()]
vertices = {db_id_from_graph_id(int(v['id'])):v['name'] for v in G.vs}
start_pos_from_db_id = lambda dbid: int(vertices[dbid].split('#')[0].split()[1].split('-')[0])
red = '#ffe7e7'
green = '#6bdb6b'
from align import ProgressIndicator
with sqlite3.connect(db) as conn:
c = conn.cursor()
c.execute('SELECT id FROM seq ORDER BY id ASC')
ids = [row[0] for row in c]
N = len(ids)*(len(ids)-1) / 2
os.mkdir(base_dir)
os.mkdir(pos_dir)
os.mkdir(neg_dir)
indic = ProgressIndicator('building spectra', N, percentage=False)
indic.start()
for S_id_idx in range(len(ids)):
for T_id_idx in range(S_id_idx, len(ids)):
S_id = ids[S_id_idx]
T_id = ids[T_id_idx]
q = """
SELECT S_idx - T_idx FROM seeds_%d
WHERE S_id = %d AND T_id = %d
""" % (wordlen, S_id, T_id)
c.execute(q)
shifts = [row[0] for row in c]
if len(shifts) < 5:
continue
indic.progress()
plt.clf()
color = 'red'
if set([S_id, T_id]) in true_overlaps:
color = 'green'
true_shift = start_pos_from_db_id(T_id) - start_pos_from_db_id(S_id)
plt.axvline(x=true_shift, ymin=ylim[0], ymax=ylim[1], color='#333333', linewidth=20, alpha=0.4)
plt.hist(shifts, num_bins, histtype='stepfilled', color=color, edgecolor=color)
plt.tick_params(axis='both', which='major', labelsize=8)
plt.xlim(*xlim)
plt.ylim(*ylim)
plt.grid(True)
plt.title('%s ----> %s (%d total seeds)' % (vertices[S_id], vertices[T_id], len(shifts)), fontsize=8, fontname='Inconsolata')
plt.savefig(os.path.join(
pos_dir if color == 'green' else neg_dir,
'%d_%d.png' % (S_id, T_id)
))
indic.finish()
|
bsd-3-clause
|
Python
|
|
d42b2ee3eb60a0c11c1a973e030de909519ba662
|
Add garage.argparse.add_bool_argument
|
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
|
garage/argparse.py
|
garage/argparse.py
|
__all__ = [
'add_bool_argument',
]
import argparse
def add_bool_argument(parser, *args, **kwargs):
kwargs = dict(kwargs) # Make a copy before modifying it...
kwargs['choices'] = (True, False)
kwargs['type'] = parse_bool
parser.add_argument(*args, **kwargs)
def parse_bool(string):
try:
return {'true': True, 'false': False}[string.lower()]
except KeyError:
raise argparse.ArgumentTypeError(
'expect either \'true\' or \'false\' instead of %r' % string)
|
mit
|
Python
|
|
10f9b0d1b02c2b5f4c4eeac0c1f803657c89764b
|
add example file for smooth OT
|
rflamary/POT,rflamary/POT
|
examples/plot_OT_1D_smooth.py
|
examples/plot_OT_1D_smooth.py
|
# -*- coding: utf-8 -*-
"""
====================
1D optimal transport
====================
This example illustrates the computation of EMD, Sinkhorn and smooth OT plans
and their visualization.
"""
# Author: Remi Flamary <remi.flamary@unice.fr>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
from ot.datasets import get_1D_gauss as gauss
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Plot distributions and loss matrix
# ----------------------------------
#%% plot the distributions
pl.figure(1, figsize=(6.4, 3))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.legend()
#%% plot distributions and loss matrix
pl.figure(2, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, M, 'Cost matrix M')
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve Sinkhorn
# --------------
#%% Sinkhorn
lambd = 2e-3
Gs = ot.sinkhorn(a, b, M, lambd, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gs, 'OT matrix Sinkhorn')
pl.show()
##############################################################################
# Solve Smooth OT
# --------------
#%% Smooth OT with KL regularization
lambd = 2e-3
Gsm = ot.smooth.smooth_ot_dual(a, b, M, lambd, reg_type='kl')
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gsm, 'OT matrix Smooth OT KL reg.')
pl.show()
#%% Smooth OT with KL regularization
lambd = 1e-1
Gsm = ot.smooth.smooth_ot_dual(a, b, M, lambd, reg_type='l2')
pl.figure(6, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gsm, 'OT matrix Smooth OT l2 reg.')
pl.show()
|
mit
|
Python
|
|
82dda8f12060ba0d3f83b6a9ff92bfcfbb212e25
|
Add utils
|
aitoralmeida/intellidata,aitoralmeida/intellidata
|
utils/extract_zipcodes.py
|
utils/extract_zipcodes.py
|
import json
from bbvalib import create_mongoclient
db = create_mongoclient()
zipcodes = set()
zipcodes.update(db.top_clients_week.find().distinct("shop_zipcode"))
zipcodes.update(db.top_clients_month.find().distinct("shop_zipcode"))
zipcodes.update(db.top_clients_week.find().distinct("home_zipcode"))
zipcodes.update(db.top_clients_month.find().distinct("home_zipcode"))
json.dump(list(zipcodes), open('home_zipcodes.json', 'w'))
|
apache-2.0
|
Python
|
|
31110631d7fa43c695bc1f5504cd02c3d0cab745
|
Add some tests
|
antismash/db-api,antismash/db-api
|
tests/test_error_handlers.py
|
tests/test_error_handlers.py
|
from testutils import app # noqa: F401
def test_not_found(client):
'''Test 404 error handler'''
results = client.get('/totally/made/up')
assert results.status_code == 404
assert results.json == {'error': 'Not found'}
def test_method_not_allowed(client):
'''Test 405 error handler'''
results = client.get('/api/v1.0/search')
assert results.status_code == 405
assert results.json == {'error': 'Method not allowed'}
def test_internal_server_error(client):
'''Test 500 error handler'''
results = client.get('/api/v1.0/stats')
assert results.status_code == 500
assert results.json == {'error': 'Internal server error'}
|
agpl-3.0
|
Python
|
|
9d5be9d464168a3d9e9b3265a1581e0359f69f2a
|
test for #579
|
RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib
|
test/test_issue579.py
|
test/test_issue579.py
|
# test for https://github.com/RDFLib/rdflib/issues/579
from rdflib import Graph, URIRef, Literal, Namespace
from rdflib.namespace import FOAF, RDF
g = Graph()
g.bind('foaf', FOAF)
n = Namespace("http://myname/")
g.add((n.bob, FOAF.name, Literal('bb')))
# query is successful.
assert len(g.query("select ?n where { ?n foaf:name 'bb' . }")) == 1
# update is not.
g.update("delete where { ?e foaf:name 'ss' .}")
assert len(g) == 1
g.update("delete where { ?e foaf:name 'bb' .}")
assert len(g) == 0
|
bsd-3-clause
|
Python
|
|
d9b4b0d913304b19365854b0ffceab179237d8f8
|
Add tests for float->int symbols (esp for 32-bit windows and linux)
|
numba/numba,stonebig/numba,stefanseefeld/numba,sklam/numba,seibert/numba,pitrou/numba,GaZ3ll3/numba,IntelLabs/numba,pombredanne/numba,stonebig/numba,gdementen/numba,pitrou/numba,pombredanne/numba,pitrou/numba,sklam/numba,ssarangi/numba,numba/numba,cpcloud/numba,stuartarchibald/numba,pombredanne/numba,seibert/numba,jriehl/numba,jriehl/numba,GaZ3ll3/numba,pombredanne/numba,stefanseefeld/numba,jriehl/numba,pombredanne/numba,pitrou/numba,stefanseefeld/numba,numba/numba,GaZ3ll3/numba,cpcloud/numba,gmarkall/numba,gmarkall/numba,cpcloud/numba,IntelLabs/numba,cpcloud/numba,jriehl/numba,ssarangi/numba,seibert/numba,IntelLabs/numba,GaZ3ll3/numba,stuartarchibald/numba,sklam/numba,jriehl/numba,stefanseefeld/numba,GaZ3ll3/numba,gdementen/numba,seibert/numba,gmarkall/numba,pitrou/numba,cpcloud/numba,stefanseefeld/numba,stuartarchibald/numba,IntelLabs/numba,sklam/numba,numba/numba,gmarkall/numba,ssarangi/numba,gdementen/numba,stuartarchibald/numba,gdementen/numba,ssarangi/numba,seibert/numba,ssarangi/numba,numba/numba,sklam/numba,stonebig/numba,stuartarchibald/numba,IntelLabs/numba,stonebig/numba,gmarkall/numba,gdementen/numba,stonebig/numba
|
numba/tests/test_floatsyms.py
|
numba/tests/test_floatsyms.py
|
from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
class TestFloatSymbols(unittest.TestCase):
"""
Test ftol symbols on windows
"""
def _test_template(self, realty, intty):
def cast(x):
y = x
return y
cres = compile_isolated(cast, args=[realty], return_type=intty)
self.assertAlmostEqual(cres.entry_point(1.), 1)
def test_float64_to_int64(self):
self._test_template(types.float64, types.int64)
def test_float64_to_uint64(self):
self._test_template(types.float64, types.uint64)
def test_float64_to_int32(self):
self._test_template(types.float64, types.int32)
def test_float64_to_uint32(self):
self._test_template(types.float64, types.uint32)
def test_float32_to_int64(self):
self._test_template(types.float32, types.int64)
def test_float32_to_uint64(self):
self._test_template(types.float32, types.uint64)
def test_float32_to_int32(self):
self._test_template(types.float32, types.int32)
def test_float32_to_uint32(self):
self._test_template(types.float32, types.uint32)
if __name__ == '__main__':
unittest.main()
|
bsd-2-clause
|
Python
|
|
45a6e108418944026f6d67fa018573a831dc5107
|
add mul_recds.py
|
r2k0/flask-apps,r2k0/flask-apps
|
test/sql/mul_recds.py
|
test/sql/mul_recds.py
|
import sqlite3
with sqlite3.connect("new.db") as connection:
c = connection.cursor()
cities = [
('Boston','MA',600000)
]
|
mit
|
Python
|
|
aa320afb447ab8486720a05f0613cad446c9bea9
|
Add graph module.
|
sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria
|
plumeria/plugins/graph.py
|
plumeria/plugins/graph.py
|
import asyncio
import io
import re
import matplotlib
import pkg_resources
from plumeria.command import commands, CommandError
from plumeria.message import Response, MemoryAttachment
from plumeria.util.ratelimit import rate_limit
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
PERCENTAGE_PATTERN = re.compile("([0-9]+\\.?[0-9]*)%")
font_path = pkg_resources.resource_filename("plumeria", 'fonts/FiraSans-Regular.ttf')
def generate_pie(labels, data):
plt.figure(1, figsize=(5, 5))
ax = plt.axes([0.1, 0.1, 0.4, 0.4])
plt.pie(data, labels=labels, autopct='%1.0f%%', startangle=90)
prop = fm.FontProperties(fname=font_path, size=11)
for text in ax.texts:
text.set_fontproperties(prop)
buf = io.BytesIO()
plt.savefig(buf, bbox_inches='tight', transparent="True", pad_inches=0.1)
plt.clf()
return buf
@commands.register("pie", category="Graphing")
@rate_limit()
async def image(message):
"""
Generate a pie graph.
"""
labels = []
data = []
total_pct = 0
if ';' in message.content:
delimeter = ';'
elif ',' in message.content:
delimeter = ','
else:
raise CommandError("Split pie sections with ; or ,")
for part in message.content.strip().split(delimeter):
m = PERCENTAGE_PATTERN.search(part)
if m:
labels.append(PERCENTAGE_PATTERN.sub("", part, 1).strip())
pct = float(m.group(1)) / 100
data.append(pct)
total_pct += pct
else:
raise CommandError("Could not find a % in '{}'".format(part))
data = list(map(lambda x: x / total_pct, data))
def execute():
return generate_pie(labels, data)
buf = await asyncio.get_event_loop().run_in_executor(None, execute)
return Response("", attachments=[MemoryAttachment(buf, "graph.png", "image/png")])
|
mit
|
Python
|
|
8ff6f08a497adba17bd02eae9ec6425a71927e08
|
Update admin.py
|
imgmix/django-avatar,MachineandMagic/django-avatar,jezdez/django-avatar,brajeshvit/avatarmodule,therocode/django-avatar,ad-m/django-avatar,dannybrowne86/django-avatar,allenling/django-avatar,therocode/django-avatar,barbuza/django-avatar,ad-m/django-avatar,tbabej/django-avatar,brajeshvit/avatarmodule,jezdez/django-avatar,grantmcconnaughey/django-avatar,grantmcconnaughey/django-avatar,tbabej/django-avatar,dannybrowne86/django-avatar,imgmix/django-avatar,barbuza/django-avatar,MachineandMagic/django-avatar,allenling/django-avatar
|
avatar/admin.py
|
avatar/admin.py
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django.utils import six
from django.template.loader import render_to_string
from avatar.models import Avatar
from avatar.signals import avatar_updated
from avatar.util import get_user_model
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__%s' % getattr(get_user_model(), 'USERNAME_FIELD', 'username'),)
list_per_page = 50
def get_avatar(self, avatar_in):
context = dict({
'user': avatar_in.user,
'url': avatar_in.avatar.url,
'alt': six.text_type(avatar_in.user),
'size': 80,
})
return render_to_string('avatar/avatar_tag.html',context)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
def save_model(self, request, obj, form, change):
super(AvatarAdmin, self).save_model(request, obj, form, change)
avatar_updated.send(sender=Avatar, user=request.user, avatar=obj)
admin.site.register(Avatar, AvatarAdmin)
|
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from avatar.models import Avatar
from avatar.signals import avatar_updated
from avatar.templatetags.avatar_tags import avatar
from avatar.util import get_user_model
class AvatarAdmin(admin.ModelAdmin):
list_display = ('get_avatar', 'user', 'primary', "date_uploaded")
list_filter = ('primary',)
search_fields = ('user__%s' % getattr(get_user_model(), 'USERNAME_FIELD', 'username'),)
list_per_page = 50
def get_avatar(self, avatar_in):
return avatar(avatar_in.user, 80)
get_avatar.short_description = _('Avatar')
get_avatar.allow_tags = True
def save_model(self, request, obj, form, change):
super(AvatarAdmin, self).save_model(request, obj, form, change)
avatar_updated.send(sender=Avatar, user=request.user, avatar=obj)
admin.site.register(Avatar, AvatarAdmin)
|
bsd-3-clause
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.