commit
stringlengths 40
40
| subject
stringlengths 4
1.73k
| repos
stringlengths 5
127k
| old_file
stringlengths 2
751
| new_file
stringlengths 2
751
| new_contents
stringlengths 1
8.98k
| old_contents
stringlengths 0
6.59k
| license
stringclasses 13
values | lang
stringclasses 23
values |
---|---|---|---|---|---|---|---|---|
77859dbc019a19222ada36ebccc849ba77649a86
|
add to unicode functions to all forum models
|
byteweaver/django-forums,byteweaver/django-forums,ckcnik/django-forums,ckcnik/django-forums
|
forums/models.py
|
forums/models.py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
position = models.IntegerField(_("Position"), default=0)
class Meta:
ordering = ['position']
def __unicode__(self):
return self.name
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums')
name = models.CharField(_("Name"), max_length=255)
position = models.IntegerField(_("Position"), default=0)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['position']
def __unicode__(self):
return self.name
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics')
name = models.CharField(_("Name"), max_length=255)
last_post = models.ForeignKey('Post', verbose_name=_("Last post"), related_name='forum_last_post', blank=True, null=True)
class Meta:
ordering = ['-last_post__created']
def __unicode__(self):
return self.name
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts')
user = models.ForeignKey(User, related_name='forum_posts')
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"),auto_now=True)
body = models.TextField(_("Body"))
class Meta:
ordering = ['created']
def __unicode__(self):
return self.body
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class Category(models.Model):
name = models.CharField(_("Name"), max_length=255, unique=True)
position = models.IntegerField(_("Position"), default=0)
class Meta:
ordering = ['position']
class Forum(models.Model):
category = models.ForeignKey(Category, related_name='forums')
name = models.CharField(_("Name"), max_length=255)
position = models.IntegerField(_("Position"), default=0)
description = models.TextField(_("Description"), blank=True)
class Meta:
ordering = ['position']
class Topic(models.Model):
forum = models.ForeignKey(Forum, related_name='topics')
name = models.CharField(_("Name"), max_length=255)
last_post = models.ForeignKey('Post', verbose_name=_("Last post"), related_name='forum_last_post', blank=True, null=True)
class Meta:
ordering = ['-last_post__created']
class Post(models.Model):
topic = models.ForeignKey(Topic, related_name='posts')
user = models.ForeignKey(User, related_name='forum_posts')
created = models.DateTimeField(_("Created"), auto_now_add=True)
updated = models.DateTimeField(_("Updated"),auto_now=True)
body = models.TextField(_("Body"))
class Meta:
ordering = ['created']
|
bsd-3-clause
|
Python
|
694df5ba69e4e7123009605e59c2b5417a3b52c5
|
Remove print statement about number of bins
|
fauzanzaid/IUCAA-GRB-detection-Feature-extraction
|
tools/fitsevt.py
|
tools/fitsevt.py
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
#! /usr/bin/python3
import sys
import os
import math
from astropy.io import fits
inputFolder = sys.argv[1]
outputFolder = sys.argv[2]
eLo = int(sys.argv[3])
eHi = int(sys.argv[4])
binSize = int(sys.argv[5])
fnames = os.listdir(inputFolder)
for fname in fnames:
print(fname)
hdulist = fits.open(inputFolder+"/"+fname)
for i in range(1,5):
timeRange = hdulist[i].header["TSTOP"] - hdulist[i].header["TSTART"]
nBins = math.ceil(timeRange/binSize)
count = [0]*nBins
print(nBins)
for event in hdulist[i].data:
if(event["ENERGY"]>=eLo or event["ENERGY"]<=eHi):
index = math.floor( nBins*(event["Time"] - hdulist[i].header["TSTART"])/timeRange )
count[index] += 1
sigClass = 1
with open(outputFolder+"/{0}_{1}".format(fname,i),'w') as f:
f.write("{0} {1}\n".format(nBins,sigClass))
for j in range(nBins):
f.write("{0}\n".format(count[j]))
|
mit
|
Python
|
22a4644bd510a8b786d181c01c20f3dc522dac8d
|
Update corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
corehq/apps/auditcare/migrations/0004_add_couch_id.py
|
# Generated by Django 2.2.20 on 2021-05-21 17:32
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddIndex(
model_name='accessaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=ACCESS_INDEX),
),
]
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
|
# Generated by Django 2.2.20 on 2021-05-21 17:32
from django.db import migrations, models
ACCESS_INDEX = "audit_access_couch_10d1b_idx"
ACCESS_TABLE = "auditcare_accessaudit"
NAVIGATION_EVENT_INDEX = "audit_nav_couch_875bc_idx"
NAVIGATION_EVENT_TABLE = "auditcare_navigationeventaudit"
def _create_index_sql(table_name, index_name):
return """
CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS {} ON {} (couch_id)
WHERE couch_id IS NOT NULL
""".format(index_name, table_name)
def _drop_index_sql(index_name):
return "DROP INDEX CONCURRENTLY IF EXISTS {}".format(index_name)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('auditcare', '0003_truncatechars'),
]
operations = [
migrations.AddField(
model_name='accessaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(ACCESS_TABLE, ACCESS_INDEX),
reverse_sql=_drop_index_sql(ACCESS_INDEX),
state_operations=[
migrations.AddIndex(
model_name='accessaudit',
index=models.Index(fields=['couch_id'], name=ACCESS_INDEX),
),
]
),
migrations.AddField(
model_name='navigationeventaudit',
name='couch_id',
field=models.CharField(max_length=126, null=True),
),
migrations.RunSQL(
sql=_create_index_sql(NAVIGATION_EVENT_TABLE, NAVIGATION_EVENT_INDEX),
reverse_sql=_drop_index_sql(NAVIGATION_EVENT_INDEX),
state_operations=[
migrations.AddIndex(
model_name='navigationeventaudit',
index=models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name=NAVIGATION_EVENT_INDEX),
),
]
),
]
|
bsd-3-clause
|
Python
|
0c816aaa82ee9fee1ee244c6b96c1a2718ec836e
|
use default python command from the environment
|
Rio517/pledgeservice,Rio517/pledgeservice,MayOneUS/pledgeservice,MayOneUS/pledgeservice,Rio517/pledgeservice
|
testrunner.py
|
testrunner.py
|
#!/usr/bin/env python
import os
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps."""
SDK_PATH_manual = '/usr/local/google_appengine'
TEST_PATH_manual = '../unittests'
def main(sdk_path, test_path):
os.chdir('backend')
sys.path.extend([sdk_path, '.', '../lib', '../testlib'])
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
if not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful():
sys.exit(-1)
if __name__ == '__main__':
SDK_PATH = SDK_PATH_manual
TEST_PATH = TEST_PATH_manual
if len(sys.argv)==2:
SDK_PATH = sys.argv[1]
main(SDK_PATH, TEST_PATH)
|
#!/usr/bin/python
import os
import sys
import unittest
USAGE = """%prog SDK_PATH TEST_PATH
Run unit tests for App Engine apps."""
SDK_PATH_manual = '/usr/local/google_appengine'
TEST_PATH_manual = '../unittests'
def main(sdk_path, test_path):
os.chdir('backend')
sys.path.extend([sdk_path, '.', '../lib', '../testlib'])
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(test_path)
if not unittest.TextTestRunner(verbosity=2).run(suite).wasSuccessful():
sys.exit(-1)
if __name__ == '__main__':
SDK_PATH = SDK_PATH_manual
TEST_PATH = TEST_PATH_manual
if len(sys.argv)==2:
SDK_PATH = sys.argv[1]
main(SDK_PATH, TEST_PATH)
|
apache-2.0
|
Python
|
ec2aaf86f2002b060f6e5b4d040961a37f89d06a
|
Update rearrange-string-k-distance-apart.py
|
kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,jaredkoontz/leetcode,jaredkoontz/leetcode,jaredkoontz/leetcode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,githubutilities/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,yiwen-luo/LeetCode,githubutilities/LeetCode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015
|
Python/rearrange-string-k-distance-apart.py
|
Python/rearrange-string-k-distance-apart.py
|
# Time: O(n)
# Space: O(n)
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
cnts = [0] * 26;
for c in str:
cnts[ord(c) - ord('a')] += 1
sorted_cnts = []
for i in xrange(26):
sorted_cnts.append((cnts[i], chr(i + ord('a'))))
sorted_cnts.sort(reverse=True)
max_cnt = sorted_cnts[0][0]
blocks = [[] for _ in xrange(max_cnt)]
i = 0
for cnt in sorted_cnts:
for _ in xrange(cnt[0]):
blocks[i].append(cnt[1])
i = (i + 1) % max(cnt[0], max_cnt - 1)
for i in xrange(max_cnt-1):
if len(blocks[i]) < k:
return ""
return "".join(map(lambda x : "".join(x), blocks))
# Time: O(nlogc), c is the count of unique characters.
# Space: O(c)
from collections import defaultdict
from heapq import heappush, heappop
class Solution2(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k == 0:
return str
cnts = defaultdict(int)
for c in str:
cnts[c] += 1
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(str) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
|
# Time: O(nlogc), c is the count of unique characters.
# Space: O(c)
from collections import defaultdict
from heapq import heappush, heappop
class Solution(object):
def rearrangeString(self, str, k):
"""
:type str: str
:type k: int
:rtype: str
"""
if k == 0:
return str
cnts = defaultdict(int)
for c in str:
cnts[c] += 1
heap = []
for c, cnt in cnts.iteritems():
heappush(heap, [-cnt, c])
result = []
while heap:
used_cnt_chars = []
for _ in xrange(min(k, len(str) - len(result))):
if not heap:
return ""
cnt_char = heappop(heap)
result.append(cnt_char[1])
cnt_char[0] += 1
if cnt_char[0] < 0:
used_cnt_chars.append(cnt_char)
for cnt_char in used_cnt_chars:
heappush(heap, cnt_char)
return "".join(result)
|
mit
|
Python
|
392cf8f05b6c23600e7a61a51494771ab08f2274
|
add exceptions to should_curry
|
JNRowe/toolz,jcrist/toolz,karansag/toolz,berrytj/toolz,machinelearningdeveloper/toolz,whilo/toolz,jdmcbr/toolz,berrytj/toolz,quantopian/toolz,jdmcbr/toolz,bartvm/toolz,JNRowe/toolz,simudream/toolz,machinelearningdeveloper/toolz,obmarg/toolz,karansag/toolz,pombredanne/toolz,pombredanne/toolz,simudream/toolz,Julian-O/toolz,llllllllll/toolz,jcrist/toolz,obmarg/toolz,cpcloud/toolz,llllllllll/toolz,whilo/toolz,cpcloud/toolz,quantopian/toolz,bartvm/toolz,Julian-O/toolz
|
toolz/curried.py
|
toolz/curried.py
|
"""
Alternate namespece for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)])
>>> map(lambda seq: get(0, seq), data)
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> map(get(0), data)
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from .functoolz import curry
import inspect
def nargs(f):
try:
return len(inspect.getargspec(f).args)
except TypeError:
return None
exceptions = set((toolz.map, toolz.filter))
def should_curry(f):
return (callable(f) and nargs(f) and nargs(f) > 1
or f in exceptions)
d = dict((name, curry(f) if '__' not in name and should_curry(f) else f)
for name, f in toolz.__dict__.items())
locals().update(d)
|
"""
Alternate namespece for toolz such that all functions are curried
Currying provides implicit partial evaluation of all functions
Example:
Get usually requires two arguments, an index and a collection
>>> from toolz.curried import get
>>> get(0, ('a', 'b'))
'a'
When we use it in higher order functions we often want to pass a partially
evaluated form
>>> data = [(1, 2), (11, 22), (111, 222)])
>>> map(lambda seq: get(0, seq), data)
[1, 11, 111]
The curried version allows simple expression of partial evaluation
>>> map(get(0), data)
[1, 11, 111]
See Also:
toolz.functoolz.curry
"""
import toolz
from .functoolz import curry
import inspect
def nargs(f):
try:
return len(inspect.getargspec(f).args)
except TypeError:
return None
def should_curry(f):
return callable(f) and nargs(f) and nargs(f) > 1
d = dict((name, curry(f) if '__' not in name and should_curry(f) else f)
for name, f in toolz.__dict__.items())
locals().update(d)
|
bsd-3-clause
|
Python
|
778bab1b4f57eb03137c00203d7b5f32c018ca83
|
fix error
|
robinchenyu/imagepaste
|
ImagePaste.py
|
ImagePaste.py
|
# import sublime
import sublime_plugin
import os
import sys
package_file = os.path.normpath(os.path.abspath(__file__))
package_path = os.path.dirname(package_file)
lib_path = os.path.join(package_path, "lib")
if lib_path not in sys.path:
sys.path.append(lib_path)
print(sys.path)
from PIL import ImageGrab
from PIL import ImageFile
class ImagePasteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
rel_fn = self.paste()
if not rel_fn:
view.run_command("paste")
return
for pos in view.sel():
# print("scope name: %r" % (view.scope_name(pos.begin())))
if 'text.html.markdown' in view.scope_name(pos.begin()):
view.insert(edit, pos.begin(), "" % rel_fn)
else:
view.insert(edit, pos.begin(), "%s" % rel_fn)
# only the first cursor add the path
break
def paste(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
im = ImageGrab.grabclipboard()
if im:
abs_fn, rel_fn = self.get_filename()
im.save(abs_fn,'PNG')
return rel_fn
else:
print('clipboard buffer is not image!')
return None
def get_filename(self):
view = self.view
filename = view.file_name()
# create dir in current path with the name of current filename
dirname, _ = os.path.splitext(filename)
# create new image file under currentdir/filename_without_ext/filename_without_ext%d.png
fn_without_ext = os.path.basename(dirname)
if not os.path.lexists(dirname):
os.mkdir(dirname)
i = 0
while True:
# relative file path
rel_filename = os.path.join("%s/%s%d.png" % (fn_without_ext, fn_without_ext, i))
# absolute file path
abs_filename = os.path.join(dirname, "%s%d.png" % ( fn_without_ext, i))
if not os.path.exists(abs_filename):
break
i += 1
print("save file: " + abs_filename + "\nrel " + rel_filename)
return abs_filename, rel_filename
|
# import sublime
import sublime_plugin
import os
package_file = os.path.normpath(os.path.abspath(__file__))
package_path = os.path.dirname(package_file)
lib_path = os.path.join(package_path, "lib")
if lib_path not in sys.path:
sys.path.append(lib_path)
print(sys.path)
from PIL import ImageGrab
from PIL import ImageFile
class ImagePasteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
rel_fn = self.paste()
if not rel_fn:
view.run_command("paste")
return
for pos in view.sel():
# print("scope name: %r" % (view.scope_name(pos.begin())))
if 'text.html.markdown' in view.scope_name(pos.begin()):
view.insert(edit, pos.begin(), "" % rel_fn)
else:
view.insert(edit, pos.begin(), "%s" % rel_fn)
# only the first cursor add the path
break
def paste(self):
ImageFile.LOAD_TRUNCATED_IMAGES = True
im = ImageGrab.grabclipboard()
if im:
abs_fn, rel_fn = self.get_filename()
im.save(abs_fn,'PNG')
return rel_fn
else:
print('clipboard buffer is not image!')
return None
def get_filename(self):
view = self.view
filename = view.file_name()
# create dir in current path with the name of current filename
dirname, _ = os.path.splitext(filename)
# create new image file under currentdir/filename_without_ext/filename_without_ext%d.png
fn_without_ext = os.path.basename(dirname)
if not os.path.lexists(dirname):
os.mkdir(dirname)
i = 0
while True:
# relative file path
rel_filename = os.path.join("%s/%s%d.png" % (fn_without_ext, fn_without_ext, i))
# absolute file path
abs_filename = os.path.join(dirname, "%s%d.png" % ( fn_without_ext, i))
if not os.path.exists(abs_filename):
break
i += 1
print("save file: " + abs_filename + "\nrel " + rel_filename)
return abs_filename, rel_filename
|
mit
|
Python
|
734457ed995a3dfcacf8556ed4e98e7536e63a66
|
Fix typos
|
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
|
nodeconductor/openstack/management/commands/initsecuritygroups.py
|
nodeconductor/openstack/management/commands/initsecuritygroups.py
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from nodeconductor.openstack import models, executors, handlers
class Command(BaseCommand):
help_text = "Add default security groups with given names to all tenants."
def add_arguments(self, parser):
parser.add_argument('names', nargs='+', type=str)
def handle(self, *args, **options):
names = options['names']
default_security_groups = getattr(settings, 'NODECONDUCTOR', {}).get('DEFAULT_SECURITY_GROUPS')
security_groups = []
for name in names:
try:
group = next(sg for sg in default_security_groups if sg['name'] == name)
except StopIteration:
raise CommandError('There is no default security group with name %s' % name)
else:
security_groups.append(group)
for spl in models.OpenStackServiceProjectLink.objects.all():
if not spl.tenant:
continue
for group in security_groups:
if spl.security_groups.filter(name=group['name']).exists():
self.stdout.write('Tenant %s already has security group %s' % (spl.tenant, group['name']))
continue
spl.security_groups.create(name=group['name'], description=group['description'])
try:
db_security_group = handlers.create_security_group(spl, group)
except handlers.SecurityGroupCreateException as e:
self.stdout.write(
'Failed to add security_group %s to tenant %s. Error: %s' % (group['name'], spl.teannt, e))
else:
executors.SecurityGroupCreateExecutor.execute(db_security_group, async=False)
self.stdout.write(
'Security group %s has been successfully added to tenant %s' % (group['name'], spl.tenant))
|
from __future__ import unicode_literals
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from nodeconductor.openstack import models, executors, handlers
class Command(BaseCommand):
help_text = "Add default security groups with given names to all tenants to tenants."
def add_arguments(self, parser):
parser.add_argument('names', nargs='+', type=str)
def handle(self, *args, **options):
names = options['names']
default_security_groups = getattr(settings, 'NODECONDUCTOR', {}).get('DEFAULT_SECURITY_GROUPS')
security_groups = []
for name in names:
try:
group = next(sg for sg in default_security_groups if sg['name'] == name)
except StopIteration:
raise CommandError('There is no default security group with name %s' % name)
else:
security_groups.append(group)
for spl in models.OpenStackServiceProjectLink.objects.all():
if not spl.tenant:
continue
for group in security_groups:
if spl.security_groups.filter(name=group['name']).exists():
self.stdout.write('Tenant %s already have security group %s' % (spl.tenant, group['name']))
continue
spl.security_groups.create(name=group['name'], description=group['description'])
try:
db_security_group = handlers.create_security_group(spl, group)
except handlers.SecurityGroupCreateException as e:
self.stdout.write(
'Failed to add security_group %s to tenant %s. Error: %s' % (group['name'], spl.teannt, e))
else:
executors.SecurityGroupCreateExecutor.execute(db_security_group, async=False)
self.stdout.write(
'Security group %s has been successfully added to tenant %s' % (group['name'], spl.tenant))
|
mit
|
Python
|
8463c22898210990e911580d217559efdbbfe5d7
|
Make disk space test optional
|
tst-mswartz/earthenterprise,google/earthenterprise,tst-ppenev/earthenterprise,tst-mswartz/earthenterprise,tst-ccamp/earthenterprise,google/earthenterprise,tst-ccamp/earthenterprise,tst-ccamp/earthenterprise,google/earthenterprise,tst-mswartz/earthenterprise,tst-mswartz/earthenterprise,tst-lsavoie/earthenterprise,google/earthenterprise,tst-eclamar/earthenterprise,tst-eclamar/earthenterprise,google/earthenterprise,tst-ppenev/earthenterprise,tst-eclamar/earthenterprise,tst-eclamar/earthenterprise,google/earthenterprise,tst-ppenev/earthenterprise,tst-lsavoie/earthenterprise,tst-eclamar/earthenterprise,tst-lsavoie/earthenterprise,tst-eclamar/earthenterprise,tst-mswartz/earthenterprise,tst-mswartz/earthenterprise,tst-eclamar/earthenterprise,tst-ppenev/earthenterprise,tst-ppenev/earthenterprise,tst-ccamp/earthenterprise,tst-ccamp/earthenterprise,tst-mswartz/earthenterprise,tst-ccamp/earthenterprise,tst-lsavoie/earthenterprise,tst-ccamp/earthenterprise,tst-lsavoie/earthenterprise,tst-lsavoie/earthenterprise,tst-mswartz/earthenterprise,tst-lsavoie/earthenterprise,tst-ccamp/earthenterprise,tst-eclamar/earthenterprise,tst-ccamp/earthenterprise,tst-ppenev/earthenterprise,tst-eclamar/earthenterprise,tst-lsavoie/earthenterprise,google/earthenterprise,google/earthenterprise,tst-ppenev/earthenterprise,tst-ppenev/earthenterprise,tst-ppenev/earthenterprise
|
earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py
|
earth_enterprise/src/fusion/portableglobe/cutter/cgi-bin/geecheck_tests/user_tests/disk_space_test.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.etree.ElementTree as ET
from geecheck_tests import common
# Need to use unittest2 for Python 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def getDiskInfo():
"""Returns disk usage represented as percent of total available."""
tree = ET.parse('/etc/opt/google/systemrc')
root = tree.getroot()
sys_rc = {}
for child in root:
sys_rc[child.tag] = child.text
asset_root = sys_rc["assetroot"];
mount_point = getMountPoint(asset_root)
available_space, size = getFsFreespace(mount_point)
percentage_used = (size - available_space) * 100 / size
return percentage_used
def getMountPoint(pathname):
"""Get the mount point of the filesystem containing pathname."""
pathname = os.path.normcase(os.path.realpath(pathname))
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
return mount_point
def getFsFreespace(pathname):
"""Get the free space of the filesystem containing pathname."""
statvfs = os.statvfs(pathname)
# Size of filesystem in bytes
size = statvfs.f_frsize * statvfs.f_blocks
# Number of free bytes that ordinary users are allowed to use.
avail = statvfs.f_frsize * statvfs.f_bavail
return avail, size
class TestDiskSpace(unittest.TestCase):
@unittest.skipUnless(common.IsFusionInstalled(), 'Fusion is not installed')
def testAdequateDiskSpace(self):
"""Check that the remaining disk space is at least 20%."""
self.assertLessEqual(20, getDiskInfo())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import xml.etree.ElementTree as ET
# Need to use unittest2 for Python 2.6.
try:
import unittest2 as unittest
except ImportError:
import unittest
def getDiskInfo():
"""Returns disk usage represented as percent of total available."""
tree = ET.parse('/etc/opt/google/systemrc')
root = tree.getroot()
sys_rc = {}
for child in root:
sys_rc[child.tag] = child.text
asset_root = sys_rc["assetroot"];
mount_point = getMountPoint(asset_root)
available_space, size = getFsFreespace(mount_point)
percentage_used = (size - available_space) * 100 / size
return percentage_used
def getMountPoint(pathname):
"""Get the mount point of the filesystem containing pathname."""
pathname = os.path.normcase(os.path.realpath(pathname))
parent_device = path_device = os.stat(pathname).st_dev
while parent_device == path_device:
mount_point = pathname
pathname = os.path.dirname(pathname)
if pathname == mount_point:
break
return mount_point
def getFsFreespace(pathname):
"""Get the free space of the filesystem containing pathname."""
statvfs = os.statvfs(pathname)
# Size of filesystem in bytes
size = statvfs.f_frsize * statvfs.f_blocks
# Number of free bytes that ordinary users are allowed to use.
avail = statvfs.f_frsize * statvfs.f_bavail
return avail, size
class TestDiskSpace(unittest.TestCase):
def testAdequateDiskSpace(self):
"""Check that the remaining disk space is at least 20%."""
self.assertLessEqual(20, getDiskInfo())
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
Python
|
a602ed873d71253723f07dfa043d959cd247d734
|
Add latest version of py-typing (#13287)
|
LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack
|
var/spack/repos/builtin/packages/py-typing/package.py
|
var/spack/repos/builtin/packages/py-typing/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTyping(PythonPackage):
"""This is a backport of the standard library typing module to Python
versions older than 3.6."""
homepage = "https://docs.python.org/3/library/typing.html"
url = "https://pypi.io/packages/source/t/typing/typing-3.7.4.1.tar.gz"
import_modules = ['typing']
version('3.7.4.1', sha256='91dfe6f3f706ee8cc32d38edbbf304e9b7583fb37108fef38229617f8b3eba23')
version('3.6.4', sha256='d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2')
version('3.6.1', sha256='c36dec260238e7464213dcd50d4b5ef63a507972f5780652e835d0228d0edace')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTyping(PythonPackage):
"""This is a backport of the standard library typing module to Python
versions older than 3.6."""
homepage = "https://docs.python.org/3/library/typing.html"
url = "https://pypi.io/packages/source/t/typing/typing-3.6.1.tar.gz"
import_modules = ['typing']
version('3.6.4', sha256='d400a9344254803a2368533e4533a4200d21eb7b6b729c173bc38201a74db3f2')
version('3.6.1', sha256='c36dec260238e7464213dcd50d4b5ef63a507972f5780652e835d0228d0edace')
# You need Python 2.7 or 3.3+ to install the typing package
depends_on('python@2.7:2.8,3.3:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
|
lgpl-2.1
|
Python
|
6641fd1275c27dfb27787ed25b80af3b6ba14b9f
|
debug by further reduction
|
sunjerry019/photonLauncher,sunjerry019/photonLauncher,sunjerry019/photonLauncher,sunjerry019/photonLauncher,sunjerry019/photonLauncher
|
apdflash/scarecrowDreams.py
|
apdflash/scarecrowDreams.py
|
print "hellow world"
|
import sys,os
sys.path.insert(0, '../helpers')
from mpi4py import MPI
|
apache-2.0
|
Python
|
14b1f9bde45b66f8752778469f1daae77b49f4e0
|
Add comment
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
bluebottle/bb_orders/signals.py
|
bluebottle/bb_orders/signals.py
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.dispatch.dispatcher import Signal
from django_fsm.signals import post_transition
from bluebottle.donations.models import Donation
from bluebottle.payments.models import OrderPayment
from bluebottle.payments.services import PaymentService
from bluebottle.utils.utils import StatusDefinition
order_requested = Signal(providing_args=["order"])
@receiver(post_save, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount_post_save(sender, instance, **kwargs):
instance.order.update_total()
@receiver(post_delete, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount(sender, instance, **kwargs):
# If we're deleting order and donations do nothing.
# If we're just deleting a donation then we should update the order total.
# Import it here to avoid circular imports
from bluebottle.orders.models import Order
try:
instance.order.update_total()
except Order.DoesNotExist:
pass
@receiver(post_transition, sender=OrderPayment)
def _order_payment_status_changed(sender, instance, **kwargs):
"""
TODO: Here we need to get the status from the Order Payment and update the
associated Order.
"""
# Get the Order from the OrderPayment
order = instance.order
# Get the mapped status OrderPayment to Order
new_order_status = order.get_status_mapping(kwargs['target'])
order.transition_to(new_order_status)
@receiver(order_requested)
def _order_requested(sender, order, **kwargs):
# Check the status at PSP if status is still locked
if order.status == StatusDefinition.LOCKED:
order_payment = OrderPayment.get_latest_by_order(order)
service = PaymentService(order_payment)
service.check_payment_status()
|
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.dispatch.dispatcher import Signal
from django_fsm.signals import post_transition
from bluebottle.donations.models import Donation
from bluebottle.payments.models import OrderPayment
from bluebottle.payments.services import PaymentService
from bluebottle.utils.utils import StatusDefinition
order_requested = Signal(providing_args=["order"])
@receiver(post_save, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount_post_save(sender, instance, **kwargs):
instance.order.update_total()
@receiver(post_delete, weak=False, sender=Donation,
dispatch_uid='donation_model')
def update_order_amount(sender, instance, **kwargs):
# If we're deleting order and donations do nothing.
# If we're just deleting a donation then we should update the order total.
from bluebottle.orders.models import Order
try:
instance.order.update_total()
except Order.DoesNotExist:
pass
@receiver(post_transition, sender=OrderPayment)
def _order_payment_status_changed(sender, instance, **kwargs):
"""
TODO: Here we need to get the status from the Order Payment and update the
associated Order.
"""
# Get the Order from the OrderPayment
order = instance.order
# Get the mapped status OrderPayment to Order
new_order_status = order.get_status_mapping(kwargs['target'])
order.transition_to(new_order_status)
@receiver(order_requested)
def _order_requested(sender, order, **kwargs):
# Check the status at PSP if status is still locked
if order.status == StatusDefinition.LOCKED:
order_payment = OrderPayment.get_latest_by_order(order)
service = PaymentService(order_payment)
service.check_payment_status()
|
bsd-3-clause
|
Python
|
de6babf92252ea5828a9c17d76766357cff3e440
|
Extend _VALID_URL (Closes #10812)
|
steebchen/youtube-dl,erikdejonge/youtube-dl,hakatashi/youtube-dl,oskar456/youtube-dl,pim89/youtube-dl,jbuchbinder/youtube-dl,luceatnobis/youtube-dl,malept/youtube-dl,nickleefly/youtube-dl,dstftw/youtube-dl,remitamine/youtube-dl,longman694/youtube-dl,marcwebbie/youtube-dl,longman694/youtube-dl,yan12125/youtube-dl,malept/youtube-dl,aeph6Ee0/youtube-dl,steebchen/youtube-dl,Orochimarufan/youtube-dl,ping/youtube-dl,rg3/youtube-dl,Orochimarufan/youtube-dl,nyuszika7h/youtube-dl,marcwebbie/youtube-dl,hakatashi/youtube-dl,Tatsh/youtube-dl,gkoelln/youtube-dl,Tatsh/youtube-dl,dstftw/youtube-dl,stannynuytkens/youtube-dl,vinegret/youtube-dl,phihag/youtube-dl,vijayanandnandam/youtube-dl,epitron/youtube-dl,kidburglar/youtube-dl,remitamine/youtube-dl,vijayanandnandam/youtube-dl,unreal666/youtube-dl,yan12125/youtube-dl,spvkgn/youtube-dl,rrooij/youtube-dl,jbuchbinder/youtube-dl,luceatnobis/youtube-dl,vinegret/youtube-dl,ozburo/youtube-dl,fluxw42/youtube-dl,pim89/youtube-dl,codesparkle/youtube-dl,epitron/youtube-dl,phihag/youtube-dl,rg3/youtube-dl,TRox1972/youtube-dl,gnowxilef/youtube-dl,gkoelln/youtube-dl,Tithen-Firion/youtube-dl,coreynicholson/youtube-dl,nickleefly/youtube-dl,erikdejonge/youtube-dl,coreynicholson/youtube-dl,gnowxilef/youtube-dl,TRox1972/youtube-dl,aeph6Ee0/youtube-dl,unreal666/youtube-dl,oskar456/youtube-dl,codesparkle/youtube-dl,ping/youtube-dl,nyuszika7h/youtube-dl,rrooij/youtube-dl,Tithen-Firion/youtube-dl,stannynuytkens/youtube-dl,kidburglar/youtube-dl,ozburo/youtube-dl,fluxw42/youtube-dl,spvkgn/youtube-dl
|
youtube_dl/extractor/tvland.py
|
youtube_dl/extractor/tvland.py
|
# coding: utf-8
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class TVLandIE(MTVServicesInfoExtractor):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|(?:full-)?episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
'info_dict': {
'description': 'md5:80973e81b916a324e05c14a3fb506d29',
'title': 'The Invasion',
},
'playlist': [],
}, {
'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
'ext': 'mp4',
'title': 'Younger|December 28, 2015|2|NO-EPISODE#|Younger: Hilary Duff - Little Lies',
'description': 'md5:7d192f56ca8d958645c83f0de8ef0269',
'upload_date': '20151228',
'timestamp': 1451289600,
},
}, {
'url': 'http://www.tvland.com/full-episodes/iu0hz6/younger-a-kiss-is-just-a-kiss-season-3-ep-301',
'only_matching': True,
}]
|
# coding: utf-8
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
class TVLandIE(MTVServicesInfoExtractor):
IE_NAME = 'tvland.com'
_VALID_URL = r'https?://(?:www\.)?tvland\.com/(?:video-clips|episodes)/(?P<id>[^/?#.]+)'
_FEED_URL = 'http://www.tvland.com/feeds/mrss/'
_TESTS = [{
# Geo-restricted. Without a proxy metadata are still there. With a
# proxy it redirects to http://m.tvland.com/app/
'url': 'http://www.tvland.com/episodes/hqhps2/everybody-loves-raymond-the-invasion-ep-048',
'info_dict': {
'description': 'md5:80973e81b916a324e05c14a3fb506d29',
'title': 'The Invasion',
},
'playlist': [],
}, {
'url': 'http://www.tvland.com/video-clips/zea2ev/younger-younger--hilary-duff---little-lies',
'md5': 'e2c6389401cf485df26c79c247b08713',
'info_dict': {
'id': 'b8697515-4bbe-4e01-83d5-fa705ce5fa88',
'ext': 'mp4',
'title': 'Younger|December 28, 2015|2|NO-EPISODE#|Younger: Hilary Duff - Little Lies',
'description': 'md5:7d192f56ca8d958645c83f0de8ef0269',
'upload_date': '20151228',
'timestamp': 1451289600,
},
}]
|
unlicense
|
Python
|
a34d08cec2cdcf259070ca51c69dcd425a04c5be
|
move use_container into execkwargs
|
dleehr/cwltool,common-workflow-language/cwltool,dleehr/cwltool,dleehr/cwltool,common-workflow-language/cwltool,common-workflow-language/cwltool,dleehr/cwltool
|
tests/util.py
|
tests/util.py
|
from __future__ import absolute_import
import os
import functools
from pkg_resources import (Requirement, ResolutionError, # type: ignore
resource_filename)
import distutils.spawn
import pytest
from cwltool.utils import onWindows, windows_default_container_id
from cwltool.factory import Factory
def get_windows_safe_factory(**execkwargs):
if onWindows():
makekwargs = {'find_default_container': functools.partial(
force_default_container, windows_default_container_id),
'use_container': True}
execkwargs['default_container': windows_default_container_id]
else:
opts = {}
return Factory(makekwargs=makekwargs, **execkwargs)
def force_default_container(default_container_id, builder):
return default_container_id
def get_data(filename):
filename = os.path.normpath(
filename) # normalizing path depending on OS or else it will cause problem when joining path
filepath = None
try:
filepath = resource_filename(
Requirement.parse("cwltool"), filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
# warning, __file__ is all lowercase on Windows systems, this can
# sometimes conflict with docker toolkit. Workaround: pip install .
# and run the tests elsewhere via python -m pytest --pyarg cwltool
return filepath
needs_docker = pytest.mark.skipif(not bool(distutils.spawn.find_executable('docker')),
reason="Requires the docker executable on the "
"system path.")
|
from __future__ import absolute_import
import os
import functools
from pkg_resources import (Requirement, ResolutionError, # type: ignore
resource_filename)
import distutils.spawn
import pytest
from cwltool.utils import onWindows, windows_default_container_id
from cwltool.factory import Factory
def get_windows_safe_factory(**execkwargs):
if onWindows():
opts = {'find_default_container': functools.partial(
force_default_container, windows_default_container_id),
'use_container': True,
'default_container': windows_default_container_id}
else:
opts = {}
return Factory(makekwargs=opts, **execkwargs)
def force_default_container(default_container_id, builder):
return default_container_id
def get_data(filename):
filename = os.path.normpath(
filename) # normalizing path depending on OS or else it will cause problem when joining path
filepath = None
try:
filepath = resource_filename(
Requirement.parse("cwltool"), filename)
except ResolutionError:
pass
if not filepath or not os.path.isfile(filepath):
filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
# warning, __file__ is all lowercase on Windows systems, this can
# sometimes conflict with docker toolkit. Workaround: pip install .
# and run the tests elsewhere via python -m pytest --pyarg cwltool
return filepath
needs_docker = pytest.mark.skipif(not bool(distutils.spawn.find_executable('docker')),
reason="Requires the docker executable on the "
"system path.")
|
apache-2.0
|
Python
|
b06863b3bd9b12c47380362b3d4182167a6d2eaa
|
Update openssl.py
|
vadimkantorov/wigwam
|
wigs/openssl.py
|
wigs/openssl.py
|
class openssl(Wig):
tarball_uri = 'https://github.com/openssl/openssl/archive/OpenSSL_$RELEASE_VERSION$.tar.gz'
git_uri = 'https://github.com/openssl/openssl'
last_release_version = 'v1_1_0e'
def setup(self):
self.configure_flags += [S.FPIC_FLAG]
def gen_configure_snippet(self):
return './config %s' % ' '.join(self.configure_flags)
|
class openssl(Wig):
tarball_uri = 'https://github.com/openssl/openssl/archive/OpenSSL_$RELEASE_VERSION$.tar.gz'
git_uri = 'https://github.com/openssl/openssl'
last_release_version = 'v1_0_2d'
def setup(self):
self.configure_flags += [S.FPIC_FLAG]
def gen_configure_snippet(self):
return './config %s' % ' '.join(self.configure_flags)
|
mit
|
Python
|
66c4b93ae78c98928946f0ceeee3a2c16be7655d
|
Add coding line
|
MichaelCurrin/twitterverse,MichaelCurrin/twitterverse
|
app/tests/integration/test_database.py
|
app/tests/integration/test_database.py
|
# -*- coding: utf-8 -*-
"""
Database tests.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from unittest import TestCase
from lib import database
from models.trends import Trend
class TestDatabaseSetup(TestCase):
"""
Test the database library module.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_drop(self):
database._dropTables()
def test_create(self):
database._createTables()
def test_baseLabels(self):
database._createTables(verbose=False)
database._baseLabels()
def test_populate(self):
database._createTables(verbose=False)
limit = 1
database._populate(limit)
class TestModel(TestCase):
"""
Test ORM operations on the SQL database.
In particular, edgecases such as unicode character handling.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_insert(self):
database._dropTables(verbose=False)
database._createTables(verbose=False)
database._baseLabels()
t = Trend(topic="abc", volume=1)
self.assertEqual(t.topic, "abc")
self.assertEqual(t.volume, 1)
t = Trend(topic="a b Ç 😊", volume=1000)
self.assertEqual(t.topic, "a b Ç 😊")
database._dropTables(verbose=False)
|
"""
Database tests.
"""
from __future__ import unicode_literals
from __future__ import absolute_import
from unittest import TestCase
from lib import database
from models.trends import Trend
class TestDatabaseSetup(TestCase):
"""
Test the database library module.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_drop(self):
database._dropTables()
def test_create(self):
database._createTables()
def test_baseLabels(self):
database._createTables(verbose=False)
database._baseLabels()
def test_populate(self):
database._createTables(verbose=False)
limit = 1
database._populate(limit)
class TestModel(TestCase):
"""
Test ORM operations on the SQL database.
In particular, edgecases such as unicode character handling.
"""
def tearDown(self):
database._dropTables(verbose=False)
def test_insert(self):
database._dropTables(verbose=False)
database._createTables(verbose=False)
database._baseLabels()
t = Trend(topic="abc", volume=1)
self.assertEqual(t.topic, "abc")
self.assertEqual(t.volume, 1)
t = Trend(topic="a b Ç 😊", volume=1000)
self.assertEqual(t.topic, "a b Ç 😊")
database._dropTables(verbose=False)
|
mit
|
Python
|
005eea5e467c3a0aa6b942ce377a5c72b9177e21
|
Fix build_lines() - s/bw/image
|
ijks/textinator
|
textinator.py
|
textinator.py
|
import click
from PIL import Image
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = image.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
|
import click
from PIL import Image
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
|
mit
|
Python
|
1f130a8577f16809008bd301ab8c47aab4677750
|
Add build_lines function, move image generation there.
|
ijks/textinator
|
textinator.py
|
textinator.py
|
import click
from PIL import Image
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False, writable=True)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
for line in build_lines(bw, newlines):
click.echo(line)
def build_lines(image, newlines=True):
width, height = image.size
for y in range(height):
line = ''
for x in range(width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
if newlines:
line += '\n'
yield line
|
import click
from PIL import Image
def scale(val, src, dst):
"""
Scale the given value from the scale of src to the scale of dst.
"""
return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0]
def value_to_char(value, palette, value_range=(0, 256)):
palette_range = (0, len(palette))
mapped = int(scale(value, value_range, palette_range))
return palette[mapped]
@click.command()
@click.argument('image', type=click.File('rb'))
@click.argument('out', type=click.File('wt'), default='-',
required=False, writable=True)
@click.option('-p', '--palette', default='█▓▒░ ',
help="A custom palette for rendering images. Goes from dark to bright.")
@click.option('-w', '--width', type=click.INT,
help="Width of output. If height is not given, the image will be proportionally scaled.")
@click.option('-h', '--height', type=click.INT,
help="Height of output. If width is not given, the image will be proportionally scaled.")
@click.option('--correct/--no-correct', default=True,
help="Wether to account for the proportions of monospaced characters. On by default.")
@click.option('--resample', default='nearest',
type=click.Choice(['nearest', 'bilinear', 'bicubic', 'antialias']),
help="Filter to use for resampling. Default is nearest.")
@click.option('--newlines/--no-newlines', default=False,
help="Wether to add a newline after each row.")
def convert(image, out, width, height,
palette, resample, correct, newlines):
"""
Converts an input image to a text representation.
Writes to stdout by default. Optionally takes another file as a second output.
Supports most filetypes, except JPEG.
For that you need to install libjpeg.
For more info see:\n
http://pillow.readthedocs.org/installation.html#external-libraries
"""
if not width or height:
width, height = 80, 24
if width and not height:
height = width
if height and not width:
width = height
original = Image.open(image)
resized = original.copy()
resized.thumbnail((height, width))
bw = resized.convert(mode="L")
o_width, o_height = bw.size
for y in range(o_height):
line = ''
for x in range(o_width):
pixel = bw.getpixel((x, y))
line += value_to_char(pixel, palette)
click.echo(line)
|
mit
|
Python
|
bf7fd4e606901fae6a434e4a375ac72bcbc66e00
|
Fix plugin
|
qiita-spots/qp-target-gene
|
tgp/plugin.py
|
tgp/plugin.py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import traceback
import sys
from os.path import exists, join, dirname, abspath
from os import makedirs, environ
from future import standard_library
from qiita_client import QiitaClient
from tgp.split_libraries import split_libraries, split_libraries_fastq
from tgp.pick_otus import pick_closed_reference_otus
with standard_library.hooks():
from configparser import ConfigParser
TASK_DICT = {
'Split libraries FASTQ': split_libraries_fastq,
'Split libraries': split_libraries,
'Pick closed-reference OTUs': pick_closed_reference_otus
}
def execute_job(server_url, job_id, output_dir):
"""Starts the plugin and executes the assigned task
Parameters
----------
server_url : str
The url of the server
job_id : str
The job id
Raises
------
RuntimeError
If there is a problem gathering the job information
"""
# Set up the Qiita Client
dflt_conf_fp = join(dirname(abspath(__file__)), 'support_files',
'config_file.cfg')
conf_fp = environ.get('QP_TARGET_GENE_CONFIG_FP', dflt_conf_fp)
config = ConfigParser()
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
qclient = QiitaClient(server_url, config.get('main', 'CLIENT_ID'),
config.get('main', 'CLIENT_SECRET'),
server_cert=config.get('main', 'SERVER_CERT'))
# Request job information. If there is a problem retrieving the job
# information, the QiitaClient already raises an error
job_info = qclient.get_job_info(job_id)
# Starting the heartbeat
qclient.start_heartbeat(job_id)
# Execute the given task
task_name = job_info['command']
task = TASK_DICT[task_name]
if not exists(output_dir):
makedirs(output_dir)
try:
success, artifacts_info, error_msg = task(
qclient, job_id, job_info['parameters'], output_dir)
except Exception:
exc_str = repr(traceback.format_exception(*sys.exc_info()))
error_msg = ("Error executing %s:\n%s" % (task_name, exc_str))
success = False
# The job completed
qclient.complete_job(job_id, success, error_msg=error_msg,
artifacts_info=artifacts_info)
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import traceback
import sys
from os.path import exists, join, dirname, abspath
from os import makedirs, environ
from future import standard_library
from qiita_client import QiitaClient, format_payload
from tgp.split_libraries import split_libraries, split_libraries_fastq
from tgp.pick_otus import pick_closed_reference_otus
with standard_library.hooks():
from configparser import ConfigParser
TASK_DICT = {
'Split libraries FASTQ': split_libraries_fastq,
'Split libraries': split_libraries,
'Pick closed-reference OTUs': pick_closed_reference_otus
}
def execute_job(server_url, job_id, output_dir):
"""Starts the plugin and executes the assigned task
Parameters
----------
server_url : str
The url of the server
job_id : str
The job id
Raises
------
RuntimeError
If there is a problem gathering the job information
"""
# Set up the Qiita Client
try:
conf_fp = environ['QP_TARGET_GENE_CONFIG_FP']
except KeyError:
conf_fp = join(dirname(abspath(__file__)), 'support_files',
'config_file.cfg')
config = ConfigParser()
with open(conf_fp, 'U') as conf_file:
config.readfp(conf_file)
qclient = QiitaClient(server_url, config.get('main', 'CLIENT_ID'),
config.get('main', 'CLIENT_SECRET'),
server_cert=config.get('main', 'SERVER_CERT'))
# Request job information
job_info = qclient.get_job_info(job_id)
# Check if we have received the job information so we can start it
if job_info and job_info['success']:
# Starting the heartbeat
qclient.start_heartbeat(job_id)
# Execute the given task
task_name = job_info['command']
task = TASK_DICT[task_name]
if not exists(output_dir):
makedirs(output_dir)
try:
payload = task(qclient, job_id, job_info['parameters'],
output_dir)
except Exception:
exc_str = repr(traceback.format_exception(*sys.exc_info()))
error_msg = ("Error executing %s:\n%s" % (task_name, exc_str))
payload = format_payload(False, error_msg=error_msg)
# The job completed
qclient.complete_job(job_id, payload)
else:
raise RuntimeError("Can't get job (%s) information" % job_id)
|
bsd-3-clause
|
Python
|
fe81916434e6aa04d9672589cb75fde3c676e19f
|
Fix revision chain
|
VinnieJohns/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core
|
src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py
|
src/ggrc/migrations/versions/20151216132037_5410607088f9_delete_background_tasks.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Delete background tasks
Revision ID: 5410607088f9
Revises: 1ef8f4f504ae
Create Date: 2015-12-16 13:20:37.341342
"""
# pylint: disable=C0103,E1101
from alembic import op
# revision identifiers, used by Alembic.
revision = '5410607088f9'
down_revision = '1ef8f4f504ae'
def upgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
def downgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Delete background tasks
Revision ID: 5410607088f9
Revises: 504f541411a5
Create Date: 2015-12-16 13:20:37.341342
"""
# pylint: disable=C0103,E1101
from alembic import op
# revision identifiers, used by Alembic.
revision = '5410607088f9'
down_revision = '504f541411a5'
def upgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
def downgrade():
"""Remove all entries from background_tasks"""
op.execute("truncate background_tasks")
|
apache-2.0
|
Python
|
07bda8adbeb798dfd100b63a784e14a00cf33927
|
add new views to urls
|
samitnuk/urlsaver_django,samitnuk/urlsaver_django
|
urlsaver/urls.py
|
urlsaver/urls.py
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.main_view, name='main'),
url(r'^register/', views.register_view, name='register'),
url(r'^login/', views.login_view, name='login'),
url(r'^logout/', views.logout_view, name='logout'),
]
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.main_view, name='main'),
]
|
mit
|
Python
|
38d47061b6c1ea3250b99f7376d7479e970974a5
|
define cheakInradius
|
mrnonz/Toc-2
|
MonteCarlo.py
|
MonteCarlo.py
|
from math import *
def checkInradius(x, y):
z = x**2 + y**2
z = sqrt(z)
if z < 1.0:
return True
else:
return False
N = int(raw_input('Insert your N (random) :: '))
|
from math import *
N = int(raw_input('Insert your N (random) :: '))
print N
|
mit
|
Python
|
33c1db03e6b52d73ee6571f3f645f1b8d01e9a25
|
Comment to clarify the use of a custom field source
|
nimiq/test-django-rest
|
snippets/serializers.py
|
snippets/serializers.py
|
from django.forms import widgets
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
# Add a filed to diplsay a list of related snippets.
snippets = serializers.PrimaryKeyRelatedField(many=True)
class Meta:
model = User
fields = ('id', 'username', 'snippets')
#class SnippetSerializer(serializers.Serializer):
# pk = serializers.Field() # Note: `Field` is an untyped read-only field.
# title = serializers.CharField(required=False,
# max_length=100)
# code = serializers.CharField(widget=widgets.Textarea,
# max_length=100000)
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES,
# default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES,
# default='friendly')
#
# def restore_object(self, attrs, instance=None):
# """
# Create or update a new snippet instance, given a dictionary
# of deserialized field values.
#
# Note that if we don't define this method, then deserializing
# data will simply return a dictionary of items.
# """
# if instance:
# # Update existing instance
# instance.title = attrs.get('title', instance.title)
# instance.code = attrs.get('code', instance.code)
# instance.linenos = attrs.get('linenos', instance.linenos)
# instance.language = attrs.get('language', instance.language)
# instance.style = attrs.get('style', instance.style)
# return instance
#
# # Create new instance
# return Snippet(**attrs)
class SnippetSerializer(serializers.ModelSerializer):
# To make it more user-friendly, let's use the username instead of the default pk. This is
# optional, obviously.
owner = serializers.Field(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
|
from django.forms import widgets
from rest_framework import serializers
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
# Add a filed to diplsay a list of related snippets.
snippets = serializers.PrimaryKeyRelatedField(many=True)
class Meta:
model = User
fields = ('id', 'username', 'snippets')
#class SnippetSerializer(serializers.Serializer):
# pk = serializers.Field() # Note: `Field` is an untyped read-only field.
# title = serializers.CharField(required=False,
# max_length=100)
# code = serializers.CharField(widget=widgets.Textarea,
# max_length=100000)
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES,
# default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES,
# default='friendly')
#
# def restore_object(self, attrs, instance=None):
# """
# Create or update a new snippet instance, given a dictionary
# of deserialized field values.
#
# Note that if we don't define this method, then deserializing
# data will simply return a dictionary of items.
# """
# if instance:
# # Update existing instance
# instance.title = attrs.get('title', instance.title)
# instance.code = attrs.get('code', instance.code)
# instance.linenos = attrs.get('linenos', instance.linenos)
# instance.language = attrs.get('language', instance.language)
# instance.style = attrs.get('style', instance.style)
# return instance
#
# # Create new instance
# return Snippet(**attrs)
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.Field(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
|
apache-2.0
|
Python
|
2b553e23791adaa9e333d6f8feded8e95fd348c9
|
Bump version to 0.2.0a0
|
sdispater/cachy
|
cachy/version.py
|
cachy/version.py
|
# -*- coding: utf-8 -*-
VERSION = '0.2.0a0'
|
# -*- coding: utf-8 -*-
VERSION = '0.1.1'
|
mit
|
Python
|
a9240cd8bcfced47b402fdbff0162ad939eaa631
|
Fix typo
|
choderalab/yank,choderalab/yank
|
Yank/multistate/__init__.py
|
Yank/multistate/__init__.py
|
#!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiState
==========
Multistate Sampling simulation algorithms, specific variants, and analyzers
This module provides a general facility for running multiple thermodynamic state multistate simulations, both general
as well as derived classes for special cases such as parallel tempering (in which
the states differ only in temperature).
The classes also provide
Provided classes include:
- :class:`yank.multistate.MultiStateSampler`
Base class for general, multi-thermodynamic state parallel multistate
- :class:`yank.multistate.ReplicaExchangeSampler`
Derived class from MultiStateSampler which allows sampled thermodynamic states
to swap based on Hamiltonian Replica Exchange
- :class:`yank.multistate.ParallelTemperingSampler`
Convenience subclass of ReplicaExchange for parallel tempering simulations
(one System object, many temperatures).
- :class:`yank.multistate.SAMSSampler`
Single-replica sampler which samples through multiple thermodynamic states
on the fly.
- :class:`yank.multistate.MultiStateReporter`
Replica Exchange reporter class to store all variables and data
Analyzers
---------
The MultiState module also provides analysis modules to analyze simulations and compute observables from data generated
under any of the MultiStateSampler's
Extending and Subclassing
-------------------------
Subclassing a sampler and analyzer is done by importing and extending any of the following:
* The base ``MultiStateSampler`` from ``multistatesampler``
* The base ``MultiStateReporter`` from ``multistatereporter``
* The base ``MultiStateAnalyzer`` or ``PhaseAnalyzer`` and base `ObservablesRegistry`` from ``multistateanalyzer``
COPYRIGHT
---------
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
-------
This code is licensed under the latest available version of the MIT License.
"""
import warnings
warnings.warn("The yank.multistate package is deprecated and it will be "
"available as openmmtools.multistate with openmmtools >= 0.18",
DeprecationWarning, stacklevel=2)
from .multistatesampler import MultiStateSampler
from .multistatereporter import MultiStateReporter
from .replicaexchange import ReplicaExchangeSampler, ReplicaExchangeAnalyzer
from .paralleltempering import ParallelTemperingSampler, ParallelTemperingAnalyzer
from .sams import SAMSSampler, SAMSAnalyzer
from .multistateanalyzer import *
from .utils import *
|
#!/usr/local/bin/env python
# ==============================================================================
# MODULE DOCSTRING
# ==============================================================================
"""
MultiState
==========
Multistate Sampling simulation algorithms, specific variants, and analyzers
This module provides a general facility for running multiple thermodynamic state multistate simulations, both general
as well as derived classes for special cases such as parallel tempering (in which
the states differ only in temperature).
The classes also provide
Provided classes include:
- :class:`yank.multistate.MultiStateSampler`
Base class for general, multi-thermodynamic state parallel multistate
- :class:`yank.multistate.ReplicaExchangeSampler`
Derived class from MultiStateSampler which allows sampled thermodynamic states
to swap based on Hamiltonian Replica Exchange
- :class:`yank.multistate.ParallelTemperingSampler`
Convenience subclass of ReplicaExchange for parallel tempering simulations
(one System object, many temperatures).
- :class:`yank.multistate.SAMSSampler`
Single-replica sampler which samples through multiple thermodynamic states
on the fly.
- :class:`yank.multistate.MultiStateReporter`
Replica Exchange reporter class to store all variables and data
Analyzers
---------
The MultiState module also provides analysis modules to analyze simulations and compute observables from data generated
under any of the MultiStateSampler's
Extending and Subclassing
-------------------------
Subclassing a sampler and analyzer is done by importing and extending any of the following:
* The base ``MultiStateSampler`` from ``multistatesampler``
* The base ``MultiStateReporter`` from ``multistatereporter``
* The base ``MultiStateAnalyzer`` or ``PhaseAnalyzer`` and base `ObservablesRegistry`` from ``multistateanalyzer``
COPYRIGHT
---------
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
-------
This code is licensed under the latest available version of the MIT License.
"""
import warning
warnings.warn("The yank.multistate package is deprecated and it will be "
"available as openmmtools.multistate with openmmtools >= 0.18",
DeprecationWarning, stacklevel=2)
from .multistatesampler import MultiStateSampler
from .multistatereporter import MultiStateReporter
from .replicaexchange import ReplicaExchangeSampler, ReplicaExchangeAnalyzer
from .paralleltempering import ParallelTemperingSampler, ParallelTemperingAnalyzer
from .sams import SAMSSampler, SAMSAnalyzer
from .multistateanalyzer import *
from .utils import *
|
mit
|
Python
|
8f1d2f0e821724f010291d340f30d5842ad32c76
|
add word2vec yahoo for shoes
|
mathrho/word2vec,mathrho/word2vec,mathrho/word2vec,mathrho/word2vec
|
extractVecMat_shoes.py
|
extractVecMat_shoes.py
|
#/datastore/zhenyang/bin/python
import sys
import os
import gensim, logging
import numpy as np
import scipy.io as sio
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
#pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
#model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#pretrained_model = './vectors.output'
pretrained_model = '../yahoo_100m_words_30d.output'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=False)
##############
classnames = open('shoes_gclasses_vps.txt', 'r').read().splitlines()
cc = 0
clsid = 0
vec_size = 30
word2vec_mat = np.zeros((len(classnames), vec_size))
for classname in classnames:
idc = 1
for cls in classname.split(';'):
wordvec = np.zeros(1, vec_size))
for cls_word in cls.split(' '):
try:
wordvec = np.add(wordvec, model[cls_word])
idc = 0
except:
print cls_word
idc = 1
break
if idc == 0:
break
word2vec_mat[clsid, :] = wordvec
clsid = clsid + 1
cc = cc + idc
#np.savetxt('attr_word2vec_GoogleNews.txt', word2vec_mat)
#sio.savemat('attr_word2vec_GoogleNews.mat', {'word2vec':word2vec_mat})
np.savetxt('shoes_word2vec_yahoo_30d.txt', word2vec_mat)
sio.savemat('shoes_word2vec_yahoo_30d.mat', {'word2vec':word2vec_mat})
print cc
if __name__ == "__main__":
main()
|
#/datastore/zhenyang/bin/python
import sys
import os
import gensim, logging
import numpy as np
import scipy.io as sio
def main():
##############
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
#pretrained_model = './vectors.bin'
#pretrained_model = '../freebase-vectors-skipgram1000-en.bin'
#pretrained_model = '../GoogleNews-vectors-negative300.bin'
#model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=True)
#pretrained_model = './vectors.output'
pretrained_model = '../yahoo_100m_words_30d.output'
model = gensim.models.Word2Vec.load_word2vec_format(pretrained_model, binary=False)
##############
classnames = open('shoes_gclasses_vps.txt', 'r').read().splitlines()
cc = 0
clsid = 0
vec_size = 30
word2vec_mat = np.zeros((len(classnames), vec_size))
for classname in classnames:
idc = 1
for cls in classname.split(';'):
wordvec = np.zeros(1, vec_size))
for cls_word in cls.split(' '):
try:
np.add(wordvec, model[cls_word])
idc = 0
except:
print cls_word
idc = 1
break
if idc == 0:
break
word2vec_mat[clsid, :] = wordvec
clsid = clsid + 1
cc = cc + idc
#np.savetxt('attr_word2vec_GoogleNews.txt', word2vec_mat)
#sio.savemat('attr_word2vec_GoogleNews.mat', {'word2vec':word2vec_mat})
np.savetxt('shoes_word2vec_yahoo_30d.txt', word2vec_mat)
sio.savemat('shoes_word2vec_yahoo_30d.mat', {'word2vec':word2vec_mat})
print cc
if __name__ == "__main__":
main()
|
apache-2.0
|
Python
|
d7e9264418cbe5574d7475094e2c06a878897c34
|
fix ALDC scraper
|
DemocracyClub/EveryElection,DemocracyClub/EveryElection,DemocracyClub/EveryElection
|
every_election/apps/election_snooper/snoopers/aldc.py
|
every_election/apps/election_snooper/snoopers/aldc.py
|
from datetime import datetime
from .base import BaseSnooper
from election_snooper.models import SnoopedElection
class ALDCScraper(BaseSnooper):
snooper_name = "ALDC"
base_url = "https://www.aldc.org/"
def get_all(self):
url = "{}category/forthcoming-by-elections/".format(self.base_url)
print(url)
soup = self.get_soup(url)
for tile in soup.find_all('article'):
title = tile.find('h2').a.text.strip()
detail_url = tile.find('h2').a['href'].strip()
date = tile.find('date').text.strip()
content = tile.find('div', {'class': 'c-editor'}).find_all('p')
if 'cause' in content[0].text.lower():
seat_control, cause = content[0].text.lower().split('cause')
cause = cause.split('\n')[0].strip(": .")
else:
cause = "unknown"
data = {
'title': title,
'source': url,
'cause': cause,
'detail': "\n".join([x.text for x in content]),
'snooper_name': self.snooper_name,
}
try:
data['date'] = datetime.strptime(date, "%B %d, %Y")
except ValueError:
pass
item, created = SnoopedElection.objects.update_or_create(
snooper_name=self.snooper_name,
detail_url=detail_url,
defaults=data
)
if created:
self.post_to_slack(item)
|
from datetime import datetime
from .base import BaseSnooper
from election_snooper.models import SnoopedElection
class ALDCScraper(BaseSnooper):
snooper_name = "ALDC"
base_url = "https://www.aldc.org/"
def get_all(self):
url = "{}category/forthcoming-by-elections/".format(self.base_url)
print(url)
soup = self.get_soup(url)
wrapper = soup.find('section', {'class': 'mod-tile-wrap'})
for tile in wrapper.find_all('div', {'class': 'tile'}):
title = tile.find(
'div', {'class': 'election-heading'}).text.strip()
detail_url = tile.find(
'div', {'class': 'election-heading'}).a['href'].strip()
content = tile.find(
'div', {'class': 'election-content'}).find_all('p')
if 'cause' in content[1].text.lower():
seat_control, cause = content[1].text.lower().split('cause')
cause = cause.split('\n')[0].strip(": .")
else:
cause = "unknown"
data = {
'title': title,
'source': url,
'cause': cause,
'detail': "\n".join([x.text for x in content]),
'snooper_name': self.snooper_name,
}
try:
data['date'] = datetime.strptime(content[0].strong.text, "%B %d, %Y")
except ValueError:
pass
item, created = SnoopedElection.objects.update_or_create(
snooper_name=self.snooper_name,
detail_url=detail_url,
defaults=data
)
if created:
self.post_to_slack(item)
|
bsd-3-clause
|
Python
|
021ca057be4333d209454b043c79f9d6d327c3e0
|
Return the response for the main page without jinja rendering as AngularJS is doing the rendering
|
dedalusj/PaperChase,dedalusj/PaperChase
|
webapp/keepupwithscience/frontend/main.py
|
webapp/keepupwithscience/frontend/main.py
|
from flask import Blueprint, render_template, make_response
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
"""Returns the main interface."""
return make_response(open('keepupwithscience/frontend/templates/main.html').read())
# return render_template('main.html')
|
from flask import Blueprint, render_template
bp = Blueprint('main', __name__)
@bp.route('/')
def index():
"""Returns the main interface."""
return render_template('main.html')
|
mit
|
Python
|
392e34a70bd2bccba268ec9de1752afc50cd1b35
|
Add the httlib dir to the build
|
cberry777/dd-agent,pfmooney/dd-agent,cberry777/dd-agent,brettlangdon/dd-agent,mderomph-coolblue/dd-agent,relateiq/dd-agent,joelvanvelden/dd-agent,JohnLZeller/dd-agent,ess/dd-agent,joelvanvelden/dd-agent,relateiq/dd-agent,Wattpad/dd-agent,jyogi/purvar-agent,c960657/dd-agent,takus/dd-agent,PagerDuty/dd-agent,a20012251/dd-agent,jraede/dd-agent,pmav99/praktoras,mderomph-coolblue/dd-agent,mderomph-coolblue/dd-agent,yuecong/dd-agent,jamesandariese/dd-agent,Shopify/dd-agent,tebriel/dd-agent,huhongbo/dd-agent,AniruddhaSAtre/dd-agent,jraede/dd-agent,Wattpad/dd-agent,remh/dd-agent,tebriel/dd-agent,PagerDuty/dd-agent,jshum/dd-agent,gphat/dd-agent,Wattpad/dd-agent,pfmooney/dd-agent,manolama/dd-agent,PagerDuty/dd-agent,pfmooney/dd-agent,guruxu/dd-agent,huhongbo/dd-agent,darron/dd-agent,takus/dd-agent,jshum/dd-agent,eeroniemi/dd-agent,huhongbo/dd-agent,darron/dd-agent,gphat/dd-agent,truthbk/dd-agent,Shopify/dd-agent,packetloop/dd-agent,AntoCard/powerdns-recursor_check,joelvanvelden/dd-agent,truthbk/dd-agent,GabrielNicolasAvellaneda/dd-agent,AntoCard/powerdns-recursor_check,yuecong/dd-agent,packetloop/dd-agent,relateiq/dd-agent,c960657/dd-agent,eeroniemi/dd-agent,amalakar/dd-agent,jshum/dd-agent,polynomial/dd-agent,gphat/dd-agent,packetloop/dd-agent,pfmooney/dd-agent,PagerDuty/dd-agent,oneandoneis2/dd-agent,jamesandariese/dd-agent,remh/dd-agent,Mashape/dd-agent,jraede/dd-agent,amalakar/dd-agent,pmav99/praktoras,AniruddhaSAtre/dd-agent,jyogi/purvar-agent,darron/dd-agent,yuecong/dd-agent,benmccann/dd-agent,Shopify/dd-agent,ess/dd-agent,a20012251/dd-agent,cberry777/dd-agent,a20012251/dd-agent,gphat/dd-agent,benmccann/dd-agent,JohnLZeller/dd-agent,manolama/dd-agent,jshum/dd-agent,oneandoneis2/dd-agent,urosgruber/dd-agent,urosgruber/dd-agent,urosgruber/dd-agent,JohnLZeller/dd-agent,Shopify/dd-agent,AniruddhaSAtre/dd-agent,indeedops/dd-agent,Mashape/dd-agent,takus/dd-agent,citrusleaf/dd-agent,huhongbo/dd-agent,guruxu/dd-agent,GabrielNicolasAvellaneda/dd-agent,relateiq/dd-agent,manolama/dd-agent,guruxu/dd-agent,indeedops/dd-agent,remh/dd-agent,tebriel/dd-agent,ess/dd-agent,packetloop/dd-agent,jvassev/dd-agent,ess/dd-agent,amalakar/dd-agent,brettlangdon/dd-agent,takus/dd-agent,pmav99/praktoras,lookout/dd-agent,jamesandariese/dd-agent,zendesk/dd-agent,GabrielNicolasAvellaneda/dd-agent,mderomph-coolblue/dd-agent,jraede/dd-agent,urosgruber/dd-agent,pfmooney/dd-agent,indeedops/dd-agent,truthbk/dd-agent,huhongbo/dd-agent,Shopify/dd-agent,a20012251/dd-agent,relateiq/dd-agent,zendesk/dd-agent,polynomial/dd-agent,lookout/dd-agent,jyogi/purvar-agent,urosgruber/dd-agent,brettlangdon/dd-agent,JohnLZeller/dd-agent,lookout/dd-agent,cberry777/dd-agent,jvassev/dd-agent,indeedops/dd-agent,jamesandariese/dd-agent,jamesandariese/dd-agent,lookout/dd-agent,JohnLZeller/dd-agent,Mashape/dd-agent,pmav99/praktoras,AniruddhaSAtre/dd-agent,yuecong/dd-agent,jraede/dd-agent,eeroniemi/dd-agent,truthbk/dd-agent,jshum/dd-agent,AniruddhaSAtre/dd-agent,lookout/dd-agent,c960657/dd-agent,pmav99/praktoras,zendesk/dd-agent,takus/dd-agent,AntoCard/powerdns-recursor_check,zendesk/dd-agent,oneandoneis2/dd-agent,AntoCard/powerdns-recursor_check,gphat/dd-agent,manolama/dd-agent,polynomial/dd-agent,brettlangdon/dd-agent,benmccann/dd-agent,AntoCard/powerdns-recursor_check,jyogi/purvar-agent,packetloop/dd-agent,jvassev/dd-agent,eeroniemi/dd-agent,GabrielNicolasAvellaneda/dd-agent,oneandoneis2/dd-agent,c960657/dd-agent,citrusleaf/dd-agent,polynomial/dd-agent,darron/dd-agent,PagerDuty/dd-agent,truthbk/dd-agent,tebriel/dd-agent,amalakar/dd-agent,Wattpad/dd-agent,Mashape/dd-agent,joelvanvelden/dd-agent,citrusleaf/dd-agent,tebriel/dd-agent,indeedops/dd-agent,jyogi/purvar-agent,eeroniemi/dd-agent,darron/dd-agent,oneandoneis2/dd-agent,amalakar/dd-agent,jvassev/dd-agent,guruxu/dd-agent,guruxu/dd-agent,c960657/dd-agent,zendesk/dd-agent,manolama/dd-agent,citrusleaf/dd-agent,Wattpad/dd-agent,Mashape/dd-agent,benmccann/dd-agent,joelvanvelden/dd-agent,cberry777/dd-agent,brettlangdon/dd-agent,ess/dd-agent,yuecong/dd-agent,jvassev/dd-agent,benmccann/dd-agent,mderomph-coolblue/dd-agent,a20012251/dd-agent,polynomial/dd-agent,remh/dd-agent,citrusleaf/dd-agent,remh/dd-agent,GabrielNicolasAvellaneda/dd-agent
|
packaging/datadog-agent-lib/setup.py
|
packaging/datadog-agent-lib/setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os, sys
from distutils.command.install import INSTALL_SCHEMES
def getVersion():
try:
from config import get_version
except ImportError:
import sys
sys.path.append("../..")
from config import get_version
return get_version()
def printVersion():
print getVersion()
def getDataFiles():
''' Load the data files from checks.d '''
import glob
curpath = os.path.dirname(os.path.join(os.path.realpath(__file__)))
checksd_path = os.path.join(curpath, 'checks.d')
checksd_glob = os.path.join(checksd_path, '*.py')
# Find all py files in the checks.d directory
checks = []
for check in glob.glob(checksd_glob):
check = os.path.basename(check)
checks.append(check)
return [('share/datadog/agent/checks.d', ['checks.d/%s' % c for c in checks])]
if __name__ == "__main__":
setup(name='datadog-agent-lib',
version=getVersion(),
description='Datatadog monitoring agent check library',
author='Datadog',
author_email='info@datadoghq.com',
url='http://datadoghq.com/',
packages=['checks', 'checks/db', 'checks/system', 'dogstream','pup', 'yaml', 'checks/libs/httplib2'],
package_data={'checks': ['libs/*'], 'pup' : ['static/*', 'pup.html']},
data_files=getDataFiles()
)
|
#!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
import os, sys
from distutils.command.install import INSTALL_SCHEMES
def getVersion():
try:
from config import get_version
except ImportError:
import sys
sys.path.append("../..")
from config import get_version
return get_version()
def printVersion():
print getVersion()
def getDataFiles():
''' Load the data files from checks.d '''
import glob
curpath = os.path.dirname(os.path.join(os.path.realpath(__file__)))
checksd_path = os.path.join(curpath, 'checks.d')
checksd_glob = os.path.join(checksd_path, '*.py')
# Find all py files in the checks.d directory
checks = []
for check in glob.glob(checksd_glob):
check = os.path.basename(check)
checks.append(check)
return [('share/datadog/agent/checks.d', ['checks.d/%s' % c for c in checks])]
if __name__ == "__main__":
setup(name='datadog-agent-lib',
version=getVersion(),
description='Datatadog monitoring agent check library',
author='Datadog',
author_email='info@datadoghq.com',
url='http://datadoghq.com/',
packages=['checks', 'checks/db', 'checks/system', 'dogstream','pup', 'yaml'],
package_data={'checks': ['libs/*', 'libs/httplib2/*'], 'pup' : ['static/*', 'pup.html']},
data_files=getDataFiles()
)
|
bsd-3-clause
|
Python
|
a9ff99f94938c5e50038b9d98200c5247e651c35
|
Fix AttributeError: module 'config' has no attribute 'expires'
|
wolfy1339/Python-IRC-Bot
|
utils/ignores.py
|
utils/ignores.py
|
import random
import time
import config
import log as logging
def check_ignored(host, channel):
ignores = config.expires['global']
if channel in config.ignores['channel'].keys():
ignores.extend(config.expires['channel'][channel])
for i in ignores:
for (uhost, expires) in i:
# if duration is not None, check if it's in the past, else say True
is_past = time.time() > expires if expires is not None else True
if host == uhost and is_past:
return True
elif host == uhost and not is_past:
del config.ignores['channel'][channel][host]
break
return False
def add_ignore(irc, event, args):
host = args[0]
base_message = "Ignoring %s for %s seconds"
indefinite = "Ignoring %s indefinately"
if len(args) > 1:
if args[1] == 'random':
duration = random.randrange(100, 10000)
expires = duration + int(time.time())
else:
duration = int(args[1])
expires = duration + int(time.time())
else:
expires = None
channel = args[2] if len(args) > 2 else None
if channel is not None:
try:
i = config.ignores['channels'][channel]
except KeyError:
i = config.ignores['channels'][channel] = []
i.append([host, expires])
else:
i = config.ignores['global']
i.append([host, expires])
if expires is not None:
if channel is not None:
logging.info(base_message + " in %s", host, duration, channel)
else:
logging.info(base_message, host, duration)
else:
if channel is not None:
logging.info(indefinite + " in %s", host, channel)
else:
logging.info(indefinite, host)
|
import random
import time
import config
import log as logging
def check_ignored(host, channel):
ignores = config.expires['global']
if channel in config.expires['channel'].keys():
ignores.extend(config.expires['channel'][channel])
for i in ignores:
for (uhost, expires) in i:
# if duration is not None, check if it's in the past, else say True
is_past = time.time() > expires if expires is not None else True
if host == uhost and is_past:
return True
elif host == uhost and not is_past:
del config.ignores['channel'][channel][host]
break
return False
def add_ignore(irc, event, args):
host = args[0]
base_message = "Ignoring %s for %s seconds"
indefinite = "Ignoring %s indefinately"
if len(args) > 1:
if args[1] == 'random':
duration = random.randrange(100, 10000)
expires = duration + int(time.time())
else:
duration = int(args[1])
expires = duration + int(time.time())
else:
expires = None
channel = args[2] if len(args) > 2 else None
if channel is not None:
try:
i = config.ignores['channels'][channel]
except KeyError:
i = config.ignores['channels'][channel] = []
i.append([host, expires])
else:
i = config.ignores['global']
i.append([host, expires])
if expires is not None:
if channel is not None:
logging.info(base_message + " in %s", host, duration, channel)
else:
logging.info(base_message, host, duration)
else:
if channel is not None:
logging.info(indefinite + " in %s", host, channel)
else:
logging.info(indefinite, host)
|
mit
|
Python
|
7de5d99866164c0f17aa85f8cdd910132ac35667
|
use re.split instead of string.split
|
hammerlab/topiary,hammerlab/topiary
|
topiary/rna/common.py
|
topiary/rna/common.py
|
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
candidate_delimiters = ["\t", ",", "\s+"]
for candidate_delimiter in candidate_delimiters:
counts = [len(re.split(candidate_delimiter, line)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename)
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name))
|
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def infer_delimiter(filename, comment_char="#", n_lines=3):
"""
Given a file which contains data separated by one of the following:
- commas
- tabs
- spaces
Return the most likely separator by sniffing the first few lines
of the file's contents.
"""
lines = []
with open(filename, "r") as f:
for line in f:
if line.startswith(comment_char):
continue
if len(lines) < n_lines:
lines.append(line)
else:
break
if len(lines) < n_lines:
raise ValueError(
"Not enough lines in %s to infer delimiter" % filename)
# the split function defaults to splitting on multiple spaces,
# which here corresponds to a candidate value of None
candidate_delimiters = ["\t", ",", None]
for candidate_delimiter in candidate_delimiters:
counts = [len(line.split(candidate_delimiter)) for line in lines]
first_line_count = counts[0]
if all(c == first_line_count for c in counts) and first_line_count > 1:
if candidate_delimiter is None:
return "\s+"
else:
return candidate_delimiter
raise ValueError("Could not determine delimiter for %s" % filename)
def check_required_columns(df, filename, required_columns):
"""
Ensure that all required columns are present in the given dataframe,
otherwise raise an exception.
"""
available_columns = set(df.columns)
for column_name in required_columns:
if column_name not in available_columns:
raise ValueError("FPKM tracking file %s missing column '%s'" % (
filename,
column_name))
|
apache-2.0
|
Python
|
ab505466859a5d2e5b397d1fb1fc3271977a2024
|
modify register validation
|
BugisDev/OrangBulukumba,BugisDev/OrangBulukumba
|
app/user/forms.py
|
app/user/forms.py
|
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, SelectField, validators
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from .models import User
from app.post.models import Post_type
from flask.ext.bcrypt import check_password_hash
class LoginForm(Form):
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Username tidak boleh kosong')])
#Add a validation when Logged In
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None or not user:
self.username.errors.append('Unknown username')
return False
if not check_password_hash(user.password, self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
class RegisterForm(Form):
full_name = StringField('Full Name', validators=[validators.Required('Nama tidak boleh kosong')])
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
email = StringField('Email', validators=[validators.Required('Email tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Password Tidak boleh kosong'),
validators.EqualTo('confirm', message='Password harus sama')])
confirm = PasswordField('Ulangi Password')
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if User.query.filter_by(username=self.username.data).first():
self.username.errors.append("Username Telah digunakan")
return False
if User.query.filter_by(email=self.email.data).first():
self.email.errors.append("Email yang anda masukkan telah terdaftar")
return False
return True
class CreatePost(Form):
title = StringField('title', validators=[validators.Required('Judul tidak boleh kosong')])
content = TextAreaField('Content', validators=[validators.Required('Konten tidak boleh kosong'),
validators.Length(max=100, message="Konten maksimal 100 karakter")])
post_type = SelectField('Type', coerce=int)
|
from flask_wtf import Form
from wtforms import StringField, PasswordField, TextAreaField, SelectField, validators
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from .models import User
from app.post.models import Post_type
from flask.ext.bcrypt import check_password_hash
class LoginForm(Form):
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Username tidak boleh kosong')])
#Add a validation when Logged In
def validate(self):
rv = Form.validate(self)
if not rv:
return False
user = User.query.filter_by(username=self.username.data).first()
if user is None or not user:
self.username.errors.append('Unknown username')
return False
if not check_password_hash(user.password, self.password.data):
self.password.errors.append('Invalid password')
return False
self.user = user
return True
class RegisterForm(Form):
full_name = StringField('Full Name', validators=[validators.Required('Nama tidak boleh kosong')])
username = StringField('Username', validators=[validators.Required('Username tidak boleh kosong')])
email = StringField('Email', validators=[validators.Required('Email tidak boleh kosong')])
password = PasswordField('Password', validators=[validators.Required('Password Tidak boleh kosong'),
validators.EqualTo('confirm', message='Password harus sama')])
confirm = PasswordField('Ulangi Password')
def validate(self):
rv = Form.validate(self)
if not rv:
return False
if User.query.filter_by(username=self.username.data).first():
self.username.errors.append("Username Telah digunakan")
return False
if User.query.filte_by(email=self.email.data).first():
self.email.errors.append("Email yang anda masukkan telah terdaftar")
return False
return True
class CreatePost(Form):
title = StringField('title', validators=[validators.Required('Judul tidak boleh kosong')])
content = TextAreaField('Content', validators=[validators.Required('Konten tidak boleh kosong'),
validators.Length(max=100, message="Konten maksimal 100 karakter")])
post_type = SelectField('Type', coerce=int)
|
apache-2.0
|
Python
|
29e56ec30c13c5fbb562e77cdb2c660d5fc52842
|
remove debugging print
|
frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe,frePPLe/frePPLe
|
freppledb/common/management/commands/generatetoken.py
|
freppledb/common/management/commands/generatetoken.py
|
#
# Copyright (C) 2021 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ...auth import getWebserviceAuthorization
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from freppledb import __version__
class Command(BaseCommand):
help = """
This command generates an API authentication token for a user.
"""
requires_system_checks = False
def get_version(self):
return __version__
def add_arguments(self, parser):
parser.add_argument("user", help="User running the command")
parser.add_argument(
"--expiry", help="Validity in days of the token", type=int, default=5
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help="Specifies the database to use",
),
def handle(self, **options):
token = getWebserviceAuthorization(
database=options["database"],
secret=None,
user=options["user"],
exp=options["expiry"] * 86400,
)
if options["verbosity"]:
print(
"Access token for %s, valid for %s days:"
% (options["user"], options["expiry"])
)
return token
|
#
# Copyright (C) 2021 by frePPLe bv
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ...auth import getWebserviceAuthorization
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS
from freppledb import __version__
class Command(BaseCommand):
help = """
This command generates an API authentication token for a user.
"""
requires_system_checks = False
def get_version(self):
return __version__
def add_arguments(self, parser):
parser.add_argument("user", help="User running the command")
parser.add_argument(
"--expiry", help="Validity in days of the token", type=int, default=5
)
parser.add_argument(
"--database",
action="store",
dest="database",
default=DEFAULT_DB_ALIAS,
help="Specifies the database to use",
),
def handle(self, **options):
token = getWebserviceAuthorization(
database=options["database"],
secret="perepe", #None,
user=options["user"],
exp=options["expiry"] * 86400,
)
if options["verbosity"]:
print(
"Access token for %s, valid for %s days:"
% (options["user"], options["expiry"])
)
return token
|
agpl-3.0
|
Python
|
9235d1aa35e6a597be3c497577de528425d6e046
|
comment cleanup
|
cubgs53/usaddress,frankleng/usaddress,yl2695/usaddress,markbaas/usaddress,ahlusar1989/probablepeople
|
training/parse_osm.py
|
training/parse_osm.py
|
from lxml import etree
import ast
import re
# parse xml data, return a list of dicts representing addresses
def xmlToAddrList(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
addr_list=[]
for element in root:
if element.tag == 'node' or element.tag =='way':
address={}
for x in element.iter('tag'):
addr = ast.literal_eval(str(x.attrib))
address[addr['k']]=addr['v']
addr_list.append(address)
return addr_list
# transform osm data into tagged training data
def osmToTraining(address_list):
train_data=[]
addr_index = 0
token_index = 0
# only the osm tags below will end up in training data; others will be ignored
osm_tags_to_addr_tags = {
"addr:house:number":"AddressNumber",
"addr:street:prefix":"StreetNamePreDirectional",
"addr:street:name":"StreetName",
"addr:street:type":"StreetNamePostType",
"addr:city":"PlaceName",
"addr:state":"StateName",
"addr:postcode":"ZipCode"}
for address in address_list:
addr_train = []
for key, value in address.items():
if key in osm_tags_to_addr_tags.keys():
addr_train.append([value ,osm_tags_to_addr_tags[key]])
train_data.append(addr_train)
return train_data
|
from lxml import etree
import ast
import re
# parse xml data, return a list of dicts representing addresses
def xmlToAddrList(xml_file):
tree = etree.parse(xml_file)
root = tree.getroot()
addr_list=[]
for element in root:
if element.tag == 'node' or element.tag =='way':
address={}
for x in element.iter('tag'):
addr = ast.literal_eval(str(x.attrib))
address[addr['k']]=addr['v']
addr_list.append(address)
return addr_list
# transform osm data into tagged training data
def osmToTraining(address_list):
train_data=[]
addr_index = 0
token_index = 0
osm_tags_to_addr_tags = {
"addr:house:number":"AddressNumber",
"addr:street:prefix":"StreetNamePreDirectional",
"addr:street:name":"StreetName",
"addr:street:type":"StreetNamePostType",
"addr:city":"PlaceName",
"addr:state":"StateName",
"addr:postcode":"ZipCode"}
for address in address_list:
addr_train = []
for key, value in address.items(): #iterate through dict ****
if key in osm_tags_to_addr_tags.keys(): #if the key is one of the defined osm tags
addr_train.append([value ,osm_tags_to_addr_tags[key]]) #add (token, tokentag)
train_data.append(addr_train)
return train_data
|
mit
|
Python
|
d7bea2995fc54c15404b4b47cefae5fc7b0201de
|
FIX partner internal code compatibility with sign up
|
ingadhoc/partner
|
partner_internal_code/res_partner.py
|
partner_internal_code/res_partner.py
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class partner(models.Model):
""""""
_inherit = 'res.partner'
internal_code = fields.Char(
'Internal Code',
copy=False,
)
# we let this to base nane search improoved
# def name_search(self, cr, uid, name, args=None,
# operator='ilike', context=None, limit=100):
# args = args or []
# res = []
# if name:
# recs = self.search(
# cr, uid, [('internal_code', operator, name)] + args,
# limit=limit, context=context)
# res = self.name_get(cr, uid, recs)
# res += super(partner, self).name_search(
# cr, uid,
# name=name, args=args, operator=operator, limit=limit)
# return res
@api.model
def create(self, vals):
if not vals.get('internal_code', False):
vals['internal_code'] = self.env[
'ir.sequence'].next_by_code('partner.internal.code') or '/'
return super(partner, self).create(vals)
_sql_constraints = {
('internal_code_uniq', 'unique(internal_code)',
'Internal Code mast be unique!')
}
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api
class partner(models.Model):
""""""
_inherit = 'res.partner'
internal_code = fields.Char(
'Internal Code')
# we let this to base nane search improoved
# def name_search(self, cr, uid, name, args=None,
# operator='ilike', context=None, limit=100):
# args = args or []
# res = []
# if name:
# recs = self.search(
# cr, uid, [('internal_code', operator, name)] + args,
# limit=limit, context=context)
# res = self.name_get(cr, uid, recs)
# res += super(partner, self).name_search(
# cr, uid,
# name=name, args=args, operator=operator, limit=limit)
# return res
@api.model
def create(self, vals):
if not vals.get('internal_code', False):
vals['internal_code'] = self.env[
'ir.sequence'].next_by_code('partner.internal.code') or '/'
return super(partner, self).create(vals)
_sql_constraints = {
('internal_code_uniq', 'unique(internal_code)',
'Internal Code mast be unique!')
}
|
agpl-3.0
|
Python
|
da05fe2d41a077276946c5d6c86995c60315e093
|
Make sure we load pyvisa-py when enumerating instruments.
|
BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex,BBN-Q/Auspex
|
src/auspex/instruments/__init__.py
|
src/auspex/instruments/__init__.py
|
import pkgutil
import importlib
import pyvisa
instrument_map = {}
for loader, name, is_pkg in pkgutil.iter_modules(__path__):
module = importlib.import_module('auspex.instruments.' + name)
if hasattr(module, "__all__"):
globals().update((name, getattr(module, name)) for name in module.__all__)
for name in module.__all__:
instrument_map.update({name:getattr(module,name)})
def enumerate_visa_instruments():
rm = pyvisa.ResourceManager("@py")
print(rm.list_resources())
def probe_instrument_ids():
rm = pyvisa.ResourceManager("@py")
for instr_label in rm.list_resources():
instr = rm.open_resource(instr_label)
try:
print(instr_label, instr.query('*IDN?'))
except:
print(instr_label, "Did not respond")
instr.close()
|
import pkgutil
import importlib
import pyvisa
instrument_map = {}
for loader, name, is_pkg in pkgutil.iter_modules(__path__):
module = importlib.import_module('auspex.instruments.' + name)
if hasattr(module, "__all__"):
globals().update((name, getattr(module, name)) for name in module.__all__)
for name in module.__all__:
instrument_map.update({name:getattr(module,name)})
def enumerate_visa_instruments():
rm = pyvisa.ResourceManager()
print(rm.list_resources())
def probe_instrument_ids():
rm = pyvisa.ResourceManager()
for instr_label in rm.list_resources():
instr = rm.open_resource(instr_label)
try:
print(instr_label, instr.query('*IDN?'))
except:
print(instr_label, "Did not respond")
instr.close()
|
apache-2.0
|
Python
|
4eb4a2eaa42cd71bf4427bdaaa1e853975432691
|
Allow keyword arguments in GeneralStoreManager.create_item method
|
PHB-CS123/graphene,PHB-CS123/graphene,PHB-CS123/graphene
|
graphene/storage/intermediate/general_store_manager.py
|
graphene/storage/intermediate/general_store_manager.py
|
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self, **kwargs):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id, **kwargs)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
from graphene.storage.id_store import *
class GeneralStoreManager:
"""
Handles the creation/deletion of nodes to the NodeStore with ID recycling
"""
def __init__(self, store):
"""
Creates an instance of the GeneralStoreManager
:param store: Store to manage
:return: General store manager to handle index recycling
:rtype: GeneralStoreManager
"""
self.store = store
self.idStore = IdStore(store.FILE_NAME + ".id")
def create_item(self):
"""
Creates an item with the type of the store being managed
:return: New item with type STORE_TYPE
"""
# Check for an available ID from the IdStore
available_id = self.idStore.get_id()
# If no ID is available, get the last index of the file
if available_id == IdStore.NO_ID:
available_id = self.store.get_last_file_index()
# Create a type based on the type our store stores
return self.store.STORAGE_TYPE(available_id)
def delete_item(self, item):
"""
Deletes the given item from the store and adds the index to its IdStore
to be recycled
:return: Nothing
:rtype: None
"""
# Get index of item to be deleted
deleted_index = item.index
# Delete the item from the store
self.store.delete_item(item)
# Add the index to the IdStore, so it can be recycled
self.idStore.store_id(deleted_index)
|
apache-2.0
|
Python
|
ad47fb85e5c2deb47cbe3fc3478e1ae2da93adfe
|
Update h-index.py
|
yiwen-luo/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,githubutilities/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,githubutilities/LeetCode,jaredkoontz/leetcode,yiwen-luo/LeetCode,githubutilities/LeetCode,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,yiwen-luo/LeetCode,jaredkoontz/leetcode,githubutilities/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,jaredkoontz/leetcode,githubutilities/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode
|
Python/h-index.py
|
Python/h-index.py
|
# Time: O(n)
# Space: O(n)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations);
count = [0] * (n + 1)
for x in citations:
if x >= n:
count[n] += 1
else:
count[x] += 1
h = 0
for i in reversed(xrange(0, n + 1)):
h += count[i]
if h >= i:
return i
return h
# Time: O(nlogn)
# Space: O(1)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for x in citations:
if x >= h + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(x >= i + 1 for i, x in enumerate(sorted(citations, reverse=True)))
|
# Time: O(nlogn)
# Space: O(1)
# Given an array of citations (each citation is a non-negative integer)
# of a researcher, write a function to compute the researcher's h-index.
#
# According to the definition of h-index on Wikipedia:
# "A scientist has index h if h of his/her N papers have
# at least h citations each, and the other N − h papers have
# no more than h citations each."
#
# For example, given citations = [3, 0, 6, 1, 5],
# which means the researcher has 5 papers in total
# and each of them had received 3, 0, 6, 1, 5 citations respectively.
# Since the researcher has 3 papers with at least 3 citations each and
# the remaining two with no more than 3 citations each, his h-index is 3.
#
# Note: If there are several possible values for h, the maximum one is taken as the h-index.
#
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort(reverse=True)
h = 0
for x in citations:
if x >= h + 1:
h += 1
else:
break
return h
# Time: O(nlogn)
# Space: O(n)
class Solution2(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
return sum(x >= i + 1 for i, x in enumerate(sorted(citations, reverse=True)))
|
mit
|
Python
|
4fd6d20be257cca38f98d20df78b35d7c7bc3911
|
Fix factory_jst
|
watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder,watchdogpolska/feder
|
feder/teryt/factory.py
|
feder/teryt/factory.py
|
from random import randint
from .models import JednostkaAdministracyjna as JST
from .models import Category
def factory_jst():
category = Category.objects.create(name="X", level=1)
return JST.objects.create(name="X", id=randint(0, 1000),
category=category,
updated_on='2015-05-12',
active=True)
|
from autofixture import AutoFixture
from .models import JednostkaAdministracyjna
def factory_jst():
jst = AutoFixture(JednostkaAdministracyjna,
field_values={'updated_on': '2015-02-12'},
generate_fk=True).create_one(commit=False)
jst.rght = 0
jst.save()
return jst
|
mit
|
Python
|
2eefaca1d7d27ebe2e9a489ab2c1dc2927e49b55
|
Bump version
|
thombashi/sqliteschema
|
sqliteschema/__version__.py
|
sqliteschema/__version__.py
|
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016, {}".format(__author__)
__license__ = "MIT License"
__version__ = "1.0.2"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016, {}".format(__author__)
__license__ = "MIT License"
__version__ = "1.0.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
mit
|
Python
|
bc904f3ab7cc9d697dc56058ac9cb578055c401f
|
raise exception rather than logging and returning
|
AniruddhaSAtre/dd-agent,ess/dd-agent,oneandoneis2/dd-agent,benmccann/dd-agent,Shopify/dd-agent,jamesandariese/dd-agent,jshum/dd-agent,eeroniemi/dd-agent,Wattpad/dd-agent,gphat/dd-agent,tebriel/dd-agent,jvassev/dd-agent,joelvanvelden/dd-agent,cberry777/dd-agent,jraede/dd-agent,oneandoneis2/dd-agent,takus/dd-agent,AniruddhaSAtre/dd-agent,yuecong/dd-agent,jshum/dd-agent,AntoCard/powerdns-recursor_check,oneandoneis2/dd-agent,pfmooney/dd-agent,a20012251/dd-agent,a20012251/dd-agent,lookout/dd-agent,AntoCard/powerdns-recursor_check,polynomial/dd-agent,remh/dd-agent,mderomph-coolblue/dd-agent,packetloop/dd-agent,oneandoneis2/dd-agent,lookout/dd-agent,JohnLZeller/dd-agent,manolama/dd-agent,mderomph-coolblue/dd-agent,jyogi/purvar-agent,citrusleaf/dd-agent,gphat/dd-agent,c960657/dd-agent,ess/dd-agent,jyogi/purvar-agent,indeedops/dd-agent,huhongbo/dd-agent,zendesk/dd-agent,polynomial/dd-agent,AntoCard/powerdns-recursor_check,Shopify/dd-agent,PagerDuty/dd-agent,takus/dd-agent,c960657/dd-agent,PagerDuty/dd-agent,pmav99/praktoras,lookout/dd-agent,tebriel/dd-agent,Mashape/dd-agent,PagerDuty/dd-agent,yuecong/dd-agent,GabrielNicolasAvellaneda/dd-agent,jyogi/purvar-agent,brettlangdon/dd-agent,urosgruber/dd-agent,indeedops/dd-agent,cberry777/dd-agent,darron/dd-agent,AntoCard/powerdns-recursor_check,brettlangdon/dd-agent,packetloop/dd-agent,packetloop/dd-agent,guruxu/dd-agent,guruxu/dd-agent,lookout/dd-agent,jamesandariese/dd-agent,jamesandariese/dd-agent,Wattpad/dd-agent,JohnLZeller/dd-agent,gphat/dd-agent,brettlangdon/dd-agent,Wattpad/dd-agent,relateiq/dd-agent,huhongbo/dd-agent,Wattpad/dd-agent,mderomph-coolblue/dd-agent,a20012251/dd-agent,remh/dd-agent,indeedops/dd-agent,remh/dd-agent,brettlangdon/dd-agent,eeroniemi/dd-agent,jshum/dd-agent,zendesk/dd-agent,manolama/dd-agent,polynomial/dd-agent,Shopify/dd-agent,jyogi/purvar-agent,c960657/dd-agent,relateiq/dd-agent,c960657/dd-agent,amalakar/dd-agent,PagerDuty/dd-agent,huhongbo/dd-agent,eeroniemi/dd-agent,Mashape/dd-agent,amalakar/dd-agent,pmav99/praktoras,AniruddhaSAtre/dd-agent,Shopify/dd-agent,takus/dd-agent,gphat/dd-agent,GabrielNicolasAvellaneda/dd-agent,pfmooney/dd-agent,benmccann/dd-agent,oneandoneis2/dd-agent,darron/dd-agent,relateiq/dd-agent,ess/dd-agent,manolama/dd-agent,darron/dd-agent,truthbk/dd-agent,manolama/dd-agent,urosgruber/dd-agent,ess/dd-agent,AntoCard/powerdns-recursor_check,Wattpad/dd-agent,lookout/dd-agent,brettlangdon/dd-agent,a20012251/dd-agent,pmav99/praktoras,GabrielNicolasAvellaneda/dd-agent,polynomial/dd-agent,yuecong/dd-agent,amalakar/dd-agent,eeroniemi/dd-agent,jraede/dd-agent,remh/dd-agent,pmav99/praktoras,yuecong/dd-agent,cberry777/dd-agent,cberry777/dd-agent,jamesandariese/dd-agent,c960657/dd-agent,guruxu/dd-agent,jamesandariese/dd-agent,cberry777/dd-agent,polynomial/dd-agent,jvassev/dd-agent,JohnLZeller/dd-agent,PagerDuty/dd-agent,truthbk/dd-agent,zendesk/dd-agent,citrusleaf/dd-agent,mderomph-coolblue/dd-agent,truthbk/dd-agent,joelvanvelden/dd-agent,manolama/dd-agent,jyogi/purvar-agent,pmav99/praktoras,joelvanvelden/dd-agent,citrusleaf/dd-agent,gphat/dd-agent,guruxu/dd-agent,takus/dd-agent,takus/dd-agent,GabrielNicolasAvellaneda/dd-agent,jshum/dd-agent,jvassev/dd-agent,jraede/dd-agent,guruxu/dd-agent,benmccann/dd-agent,jvassev/dd-agent,benmccann/dd-agent,AniruddhaSAtre/dd-agent,amalakar/dd-agent,jraede/dd-agent,indeedops/dd-agent,tebriel/dd-agent,eeroniemi/dd-agent,darron/dd-agent,huhongbo/dd-agent,urosgruber/dd-agent,urosgruber/dd-agent,yuecong/dd-agent,joelvanvelden/dd-agent,pfmooney/dd-agent,urosgruber/dd-agent,mderomph-coolblue/dd-agent,truthbk/dd-agent,packetloop/dd-agent,ess/dd-agent,Mashape/dd-agent,remh/dd-agent,amalakar/dd-agent,zendesk/dd-agent,jshum/dd-agent,jvassev/dd-agent,citrusleaf/dd-agent,indeedops/dd-agent,zendesk/dd-agent,AniruddhaSAtre/dd-agent,relateiq/dd-agent,benmccann/dd-agent,darron/dd-agent,Mashape/dd-agent,tebriel/dd-agent,pfmooney/dd-agent,Mashape/dd-agent,a20012251/dd-agent,citrusleaf/dd-agent,JohnLZeller/dd-agent,GabrielNicolasAvellaneda/dd-agent,Shopify/dd-agent,joelvanvelden/dd-agent,relateiq/dd-agent,tebriel/dd-agent,JohnLZeller/dd-agent,truthbk/dd-agent,packetloop/dd-agent,pfmooney/dd-agent,huhongbo/dd-agent,jraede/dd-agent
|
checks.d/hdfs.py
|
checks.d/hdfs.py
|
from checks import AgentCheck
class HDFSCheck(AgentCheck):
"""Report on free space and space used in HDFS.
"""
def check(self, instance):
try:
import snakebite.client
except ImportError:
raise ImportError('HDFSCheck requires the snakebite module')
if 'namenode' not in instance:
raise ValueError('Missing key \'namenode\' in HDFSCheck config')
hostport = instance['namenode']
if ':' in hostport:
host, _, port = hostport.partition(':')
port = int(port)
else:
host = hostport
port = 8020
hdfs = snakebite.client.Client(host, port)
stats = hdfs.df()
# {'used': 2190859321781L,
# 'capacity': 76890897326080L,
# 'under_replicated': 0L,
# 'missing_blocks': 0L,
# 'filesystem': 'hdfs://hostname:port',
# 'remaining': 71186818453504L,
# 'corrupt_blocks': 0L}
self.gauge('hdfs.used', stats['used'])
self.gauge('hdfs.free', stats['remaining'])
self.gauge('hdfs.capacity', stats['capacity'])
self.gauge('hdfs.in_use', float(stats['used']) / float(stats['capacity']))
self.gauge('hdfs.under_replicated', stats['under_replicated'])
self.gauge('hdfs.missing_blocks', stats['missing_blocks'])
self.gauge('hdfs.corrupt_blocks', stats['corrupt_blocks'])
if __name__ == '__main__':
check, instances = HDFSCheck.from_yaml('./hdfs.yaml')
for instance in instances:
check.check(instance)
print "Events: %r" % check.get_events()
print "Metrics: %r" % check.get_metrics()
|
from checks import AgentCheck
class HDFSCheck(AgentCheck):
"""Report on free space and space used in HDFS.
"""
def check(self, instance):
try:
import snakebite.client
except ImportError:
raise ImportError('HDFSCheck requires the snakebite module')
if 'namenode' not in instance:
self.log.info('Missing key \'namenode\' in HDFSCheck config')
return
hostport = instance['namenode']
if ':' in hostport:
host, _, port = hostport.partition(':')
port = int(port)
else:
host = hostport
port = 8020
hdfs = snakebite.client.Client(host, port)
stats = hdfs.df()
# {'used': 2190859321781L,
# 'capacity': 76890897326080L,
# 'under_replicated': 0L,
# 'missing_blocks': 0L,
# 'filesystem': 'hdfs://hostname:port',
# 'remaining': 71186818453504L,
# 'corrupt_blocks': 0L}
self.gauge('hdfs.used', stats['used'])
self.gauge('hdfs.free', stats['remaining'])
self.gauge('hdfs.capacity', stats['capacity'])
self.gauge('hdfs.in_use', float(stats['used']) / float(stats['capacity']))
self.gauge('hdfs.under_replicated', stats['under_replicated'])
self.gauge('hdfs.missing_blocks', stats['missing_blocks'])
self.gauge('hdfs.corrupt_blocks', stats['corrupt_blocks'])
if __name__ == '__main__':
check, instances = HDFSCheck.from_yaml('./hdfs.yaml')
for instance in instances:
check.check(instance)
print "Events: %r" % check.get_events()
print "Metrics: %r" % check.get_metrics()
|
bsd-3-clause
|
Python
|
6a8c8bc0e407327e5c0e4cae3d4d6ace179a6940
|
Add team eligibility to API
|
siggame/webserver,siggame/webserver,siggame/webserver
|
webserver/codemanagement/serializers.py
|
webserver/codemanagement/serializers.py
|
from rest_framework import serializers
from greta.models import Repository
from competition.models import Team
from .models import TeamClient, TeamSubmission
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = ('id', 'name', 'slug', 'eligible_to_win')
class RepoSerializer(serializers.ModelSerializer):
class Meta:
model = Repository
fields = ('name', 'description', 'forked_from',
'path', 'is_ready')
forked_from = serializers.RelatedField()
path = serializers.SerializerMethodField('get_path')
is_ready = serializers.SerializerMethodField('get_is_ready')
def get_path(self, repo):
return repo.path
def get_is_ready(self, repo):
return repo.is_ready()
class TeamSubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = TeamSubmission
fields = ('name', 'commit')
class TeamClientSerializer(serializers.ModelSerializer):
class Meta:
model = TeamClient
fields = ('team', 'repository', 'tag', 'language')
team = TeamSerializer()
repository = RepoSerializer()
tag = serializers.SerializerMethodField('get_tag')
language = serializers.SerializerMethodField('get_language')
def get_tag(self, teamclient):
try:
latest_sub= teamclient.submissions.latest()
return TeamSubmissionSerializer(latest_sub).data
except TeamSubmission.DoesNotExist:
return None
def get_language(self, teamclient):
return teamclient.base.language
|
from rest_framework import serializers
from greta.models import Repository
from competition.models import Team
from .models import TeamClient, TeamSubmission
class TeamSerializer(serializers.ModelSerializer):
class Meta:
model = Team
fields = ('id', 'name', 'slug')
class RepoSerializer(serializers.ModelSerializer):
class Meta:
model = Repository
fields = ('name', 'description', 'forked_from',
'path', 'is_ready')
forked_from = serializers.RelatedField()
path = serializers.SerializerMethodField('get_path')
is_ready = serializers.SerializerMethodField('get_is_ready')
def get_path(self, repo):
return repo.path
def get_is_ready(self, repo):
return repo.is_ready()
class TeamSubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = TeamSubmission
fields = ('name', 'commit')
class TeamClientSerializer(serializers.ModelSerializer):
class Meta:
model = TeamClient
fields = ('team', 'repository', 'tag', 'language')
team = TeamSerializer()
repository = RepoSerializer()
tag = serializers.SerializerMethodField('get_tag')
language = serializers.SerializerMethodField('get_language')
def get_tag(self, teamclient):
try:
latest_sub= teamclient.submissions.latest()
return TeamSubmissionSerializer(latest_sub).data
except TeamSubmission.DoesNotExist:
return None
def get_language(self, teamclient):
return teamclient.base.language
|
bsd-3-clause
|
Python
|
d50daddde2186d54659a4f8dbf63622311ed6d22
|
remove service class
|
aacanakin/glim
|
glim/services.py
|
glim/services.py
|
from glim.core import Service
class Config(Service):
pass
class Session(Service):
pass
class Router(Service):
pass
|
# metaclass for Service class
class DeflectToInstance(type):
def __getattr__(selfcls, a): # selfcls in order to make clear it is a class object (as we are a metaclass)
try:
# first, inquiry the class itself
return super(DeflectToInstance, selfcls).__getattr__(a)
except AttributeError:
# Not found, so try to inquiry the instance attribute:
return getattr(selfcls.instance, a)
# facade that is used for saving complex
class Service:
__metaclass__ = DeflectToInstance
instance = None
@classmethod
def boot(cls, object, configuration = {}):
if cls.instance is None:
cls.instance = object(configuration)
class Config(Service):
pass
class Session(Service):
pass
class Router(Service):
pass
|
mit
|
Python
|
72902ebcada7bdc7a889f8766b63afff82110182
|
Comment about recursion limit in categories.
|
dokterbob/django-shopkit,dokterbob/django-shopkit
|
webshop/extensions/category/__init__.py
|
webshop/extensions/category/__init__.py
|
# Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl>
#
# This file is part of django-webshop.
#
# django-webshop is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Django-webshop, by default, contains base classes for two kinds of categories:
* Simple categories, which define a base class for products that belong to
exactly one category.
* Advanced categories, that belong to zero or more categories.
Furthermore, generic abstract base models are defined for 'normal' categories
and for nested categories, allowing for the hierarchical categorization of
products.
TODO: We want a setting allowing us to limit the nestedness of categories.
For 'navigational' reasons, a number of 3 should be a reasonable default.
"""
|
# Copyright (C) 2010-2011 Mathijs de Bruin <mathijs@mathijsfietst.nl>
#
# This file is part of django-webshop.
#
# django-webshop is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Django-webshop, by default, contains base classes for two kinds of categories:
* Simple categories, which define a base class for products that belong to
exactly one category.
* Advanced categories, that belong to zero or more categories.
Furthermore, generic abstract base models are defined for 'normal' categories
and for nested categories, allowing for the hierarchical categorization of
products.
"""
|
agpl-3.0
|
Python
|
f5fad49e0b20e54e01fe4d9ae69be0694d7878f9
|
add docstring to test setup, and move to the top
|
xpansa/sale-workflow,kittiu/sale-workflow,thomaspaulb/sale-workflow,diagramsoftware/sale-workflow,adhoc-dev/sale-workflow,BT-cserra/sale-workflow,grap/sale-workflow,brain-tec/sale-workflow,BT-fgarbely/sale-workflow,Endika/sale-workflow,jabibi/sale-workflow,acsone/sale-workflow,kittiu/sale-workflow,brain-tec/sale-workflow,damdam-s/sale-workflow,akretion/sale-workflow,VitalPet/sale-workflow,Antiun/sale-workflow,Rona111/sale-workflow,anas-taji/sale-workflow,richard-willowit/sale-workflow,clubit/sale-workflow,luistorresm/sale-workflow,numerigraphe/sale-workflow,anybox/sale-workflow,BT-ojossen/sale-workflow,guewen/sale-workflow,open-synergy/sale-workflow,Eficent/sale-workflow,factorlibre/sale-workflow,BT-jmichaud/sale-workflow,numerigraphe/sale-workflow,ddico/sale-workflow,acsone/sale-workflow,alexsandrohaag/sale-workflow,jjscarafia/sale-workflow,credativUK/sale-workflow,akretion/sale-workflow,fevxie/sale-workflow
|
sale_exception_nostock/tests/test_dropshipping_skip_check.py
|
sale_exception_nostock/tests/test_dropshipping_skip_check.py
|
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestDropshippingSkipCheck(TransactionCase):
def setUp(self):
"""Set up an dropshipping sale order line.
To do that, mock the computed source location to be a supplier.
"""
super(TestDropshippingSkipCheck, self).setUp()
source_loc = self.env['stock.location'].new({'usage': 'supplier'})
self.order_line = self.env['sale.order.line'].new()
self.order_line._get_line_location = lambda: source_loc
def test_dropshipping_sale_can_always_be_delivered(self):
self.assertIs(True, self.order_line.can_command_at_delivery_date())
def test_dropshipping_sale_does_not_affect_future_orders(self):
self.assertIs(False, self.order_line.future_orders_are_affected())
|
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestDropshippingSkipCheck(TransactionCase):
def test_dropshipping_sale_can_always_be_delivered(self):
self.assertIs(True, self.order_line.can_command_at_delivery_date())
def test_dropshipping_sale_does_not_affect_future_orders(self):
self.assertIs(False, self.order_line.future_orders_are_affected())
def setUp(self):
super(TestDropshippingSkipCheck, self).setUp()
source_loc = self.env['stock.location'].new({'usage': 'supplier'})
self.order_line = self.env['sale.order.line'].new()
self.order_line._get_line_location = lambda: source_loc
|
agpl-3.0
|
Python
|
8fb97bc0b3a22b912958974636051447170a0b02
|
Add user_account to the user profile admin as a read-only field.
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
go/base/admin.py
|
go/base/admin.py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from go.base.models import GoUser, UserProfile, UserOrganisation
from go.base.forms import GoUserCreationForm, GoUserChangeForm
class UserProfileInline(admin.StackedInline):
model = UserProfile
fields = ('organisation', 'is_admin', 'user_account')
readonly_fields = ('user_account',)
can_delete = False
class GoUserAdmin(UserAdmin):
# The forms to add and change user instances
inlines = (UserProfileInline,)
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference the removed 'username' field
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
form = GoUserChangeForm
add_form = GoUserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_superuser',
'is_staff', 'is_active')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email',)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'organisation', 'is_admin')
admin.site.register(GoUser, GoUserAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(UserOrganisation)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext_lazy as _
from go.base.models import GoUser, UserProfile, UserOrganisation
from go.base.forms import GoUserCreationForm, GoUserChangeForm
class UserProfileInline(admin.StackedInline):
model = UserProfile
fields = ('organisation', 'is_admin')
can_delete = False
class GoUserAdmin(UserAdmin):
# The forms to add and change user instances
inlines = (UserProfileInline,)
# The fields to be used in displaying the User model.
# These override the definitions on the base UserAdmin
# that reference the removed 'username' field
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
form = GoUserChangeForm
add_form = GoUserCreationForm
list_display = ('email', 'first_name', 'last_name', 'is_superuser',
'is_staff', 'is_active')
search_fields = ('email', 'first_name', 'last_name')
ordering = ('email',)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'organisation', 'is_admin')
admin.site.register(GoUser, GoUserAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(UserOrganisation)
|
bsd-3-clause
|
Python
|
56d3db6aae71c88ff8b55bb1d173abc025be7e8c
|
Add test of a write command
|
prophile/jacquard,prophile/jacquard
|
jacquard/tests/test_cli.py
|
jacquard/tests/test_cli.py
|
import io
import unittest.mock
import contextlib
import textwrap
from jacquard.cli import main
from jacquard.storage.dummy import DummyStore
def test_smoke_cli_help():
try:
output = io.StringIO()
with contextlib.redirect_stdout(output):
main(['--help'])
except SystemExit:
pass
assert output.getvalue().startswith("usage: ")
def test_help_message_when_given_no_subcommand():
try:
output = io.StringIO()
with contextlib.redirect_stdout(output):
main([])
except SystemExit:
pass
assert output.getvalue().startswith("usage: ")
def test_run_basic_command():
config = unittest.mock.Mock()
config.storage = DummyStore('', data={
'foo': 'bar',
})
output = io.StringIO()
with contextlib.redirect_stdout(output):
main(['storage-dump'], config=config)
assert output.getvalue().strip() == textwrap.dedent("""
foo
===
'bar'
"""
).strip()
def test_run_write_command():
config = unittest.mock.Mock()
config.storage = DummyStore('', data={})
output = io.StringIO()
with contextlib.redirect_stdout(output):
main(['set-default', 'foo', '"bar"'], config=config)
assert output.getvalue() == ''
assert config.storage.data == {'defaults': '{"foo": "bar"}'}
|
import io
import unittest.mock
import contextlib
import textwrap
from jacquard.cli import main
from jacquard.storage.dummy import DummyStore
def test_smoke_cli_help():
try:
output = io.StringIO()
with contextlib.redirect_stdout(output):
main(['--help'])
except SystemExit:
pass
assert output.getvalue().startswith("usage: ")
def test_help_message_when_given_no_subcommand():
try:
output = io.StringIO()
with contextlib.redirect_stdout(output):
main([])
except SystemExit:
pass
assert output.getvalue().startswith("usage: ")
def test_run_basic_command():
config = unittest.mock.Mock()
config.storage = DummyStore('', data={
'foo': 'bar',
})
output = io.StringIO()
with contextlib.redirect_stdout(output):
main(['storage-dump'], config=config)
assert output.getvalue().strip() == textwrap.dedent("""
foo
===
'bar'
"""
).strip()
|
mit
|
Python
|
1d31282a9781e8eef4aafc0549c01056d4fc03d0
|
Bump version.
|
armet/python-armet
|
armet/_version.py
|
armet/_version.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 22)
__version__ = '.'.join(map(str, __version_info__))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division
__version_info__ = (0, 4, 21)
__version__ = '.'.join(map(str, __version_info__))
|
mit
|
Python
|
cec594e525fb889029b85b1f92f89170ca330332
|
Remove unnecessary "is not supported" verbiage.
|
rht/zulip,punchagan/zulip,showell/zulip,zulip/zulip,punchagan/zulip,punchagan/zulip,showell/zulip,kou/zulip,kou/zulip,punchagan/zulip,eeshangarg/zulip,zulip/zulip,kou/zulip,eeshangarg/zulip,hackerkid/zulip,eeshangarg/zulip,andersk/zulip,andersk/zulip,kou/zulip,showell/zulip,zulip/zulip,andersk/zulip,showell/zulip,zulip/zulip,kou/zulip,hackerkid/zulip,andersk/zulip,eeshangarg/zulip,hackerkid/zulip,rht/zulip,zulip/zulip,rht/zulip,hackerkid/zulip,kou/zulip,eeshangarg/zulip,kou/zulip,rht/zulip,showell/zulip,andersk/zulip,rht/zulip,punchagan/zulip,eeshangarg/zulip,hackerkid/zulip,punchagan/zulip,andersk/zulip,hackerkid/zulip,eeshangarg/zulip,zulip/zulip,punchagan/zulip,showell/zulip,rht/zulip,hackerkid/zulip,showell/zulip,andersk/zulip,zulip/zulip,rht/zulip
|
zerver/webhooks/trello/view/__init__.py
|
zerver/webhooks/trello/view/__init__.py
|
# Webhooks for external integrations.
from typing import Any, Mapping, Optional, Tuple
import orjson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view, return_success_on_head_request
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.models import UserProfile
from .board_actions import SUPPORTED_BOARD_ACTIONS, process_board_action
from .card_actions import IGNORED_CARD_ACTIONS, SUPPORTED_CARD_ACTIONS, process_card_action
@api_key_only_webhook_view('Trello')
@return_success_on_head_request
@has_request_variables
def api_trello_webhook(request: HttpRequest,
user_profile: UserProfile,
payload: Mapping[str, Any]=REQ(argument_type='body')) -> HttpResponse:
payload = orjson.loads(request.body)
action_type = payload['action'].get('type')
message = get_subject_and_body(payload, action_type)
if message is None:
return json_success()
else:
subject, body = message
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_and_body(payload: Mapping[str, Any], action_type: str) -> Optional[Tuple[str, str]]:
if action_type in SUPPORTED_CARD_ACTIONS:
return process_card_action(payload, action_type)
if action_type in IGNORED_CARD_ACTIONS:
return None
if action_type in SUPPORTED_BOARD_ACTIONS:
return process_board_action(payload, action_type)
raise UnexpectedWebhookEventType("Trello", action_type)
|
# Webhooks for external integrations.
from typing import Any, Mapping, Optional, Tuple
import orjson
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view, return_success_on_head_request
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import UnexpectedWebhookEventType, check_send_webhook_message
from zerver.models import UserProfile
from .board_actions import SUPPORTED_BOARD_ACTIONS, process_board_action
from .card_actions import IGNORED_CARD_ACTIONS, SUPPORTED_CARD_ACTIONS, process_card_action
@api_key_only_webhook_view('Trello')
@return_success_on_head_request
@has_request_variables
def api_trello_webhook(request: HttpRequest,
user_profile: UserProfile,
payload: Mapping[str, Any]=REQ(argument_type='body')) -> HttpResponse:
payload = orjson.loads(request.body)
action_type = payload['action'].get('type')
message = get_subject_and_body(payload, action_type)
if message is None:
return json_success()
else:
subject, body = message
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_and_body(payload: Mapping[str, Any], action_type: str) -> Optional[Tuple[str, str]]:
if action_type in SUPPORTED_CARD_ACTIONS:
return process_card_action(payload, action_type)
if action_type in IGNORED_CARD_ACTIONS:
return None
if action_type in SUPPORTED_BOARD_ACTIONS:
return process_board_action(payload, action_type)
raise UnexpectedWebhookEventType("Trello", f'{action_type} is not supported')
|
apache-2.0
|
Python
|
67c40fff7813b91b874c5fada042bfc0c6990d52
|
Bump version
|
thombashi/typepy
|
typepy/__version__.py
|
typepy/__version__.py
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2017-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.2"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2017-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.3.1"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
mit
|
Python
|
7f774d08ecf9d64a732a8471dd6f36b9d0e2826a
|
Update entity_main.py
|
madhurilalitha/Python-Projects
|
EntityExtractor/src/entity_main.py
|
EntityExtractor/src/entity_main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Lalitha Madhuri Putchala on Dec 10 2017
"""
import spacy
import docx2txt
import re
from docx import Document
from utils import get_sentence_tokens, tag_text, highlight_text
class Entity:
def __init__(self):
self.raw_data = docx2txt.process('Contract_Input.docx')
self.doc = Document('Contract_Input.docx')
def highlight_address_fields(self):
# extract street address/zipcodes/ proper format address and improper format address using python regex
street_address_exp = re.compile(
u'\d{1,4} [\w\s]{1,20}(?:street|st|avenue|ave|road|rd|highway|hwy|square|sq|trail|trl|drive|dr|court|ct|park|parkway|pkwy|circle|cir|boulevard|blvd)\W?(?=\D|$)',
re.IGNORECASE)
street_addresses = re.findall(street_address_exp, self.raw_data)
zip_code_exp = re.compile(r'\b\d{5}(?:[-\s]\d{4})?\b')
zip_codes = re.findall(zip_code_exp, self.raw_data)
proper_address_exp = "[0-9]{1,5} .+, .+, [A-Z]{2} [0-9]{5}"
proper_addresses = re.findall(proper_address_exp, self.raw_data)
# logic to handle the improper format address instead of using regex functions
sentence_tokens = get_sentence_tokens(self.raw_data)
for i in range(len(sentence_tokens)):
if sentence_tokens[i][0] == 'Address:':
improper_format = sentence_tokens[i + 1][0]
address_details = list()
address_details.extend(street_addresses)
address_details.extend(zip_codes)
address_details.extend(proper_addresses)
address_details.append(improper_format)
# highlight address fields
for each in address_details:
highlight_text(self.doc, each.strip())
def highlight_contact_details(self):
contact_details = list()
# Get emails and phone numbers
emails = re.findall(r'[\w\.-]+@[\w\.-]+', self.raw_data)
phonenumbers = re.findall(
r'\d{3}[-\.\s]??\d{3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4}', self.raw_data)
contact_details.extend(emails)
contact_details.extend(phonenumbers)
for contact in contact_details:
highlight_text(self.doc, contact)
def highlight_dates(self):
# Get dates
match = re.search(r'(\d+/\d+/\d+)', self.raw_data)
highlight_text(self.doc, match.group(1))
def tag_person_entities(self):
# use pre-trained spacy models to get the 'PERSON' entities
model = spacy.load('en_core_web_sm')
mydata = model(self.raw_data)
person_labels = list()
for each in mydata.ents:
if each.label_ == 'PERSON':
person_labels.append(each.text)
unique_person_labels = set(person_labels)
for label in unique_person_labels:
tag_text(self.doc, label)
print (person_labels)
return len(person_labels)
def save_document(self):
self.doc.save('Contract_Output.docx')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created by Lalitha Madhuri Putchala on Dec 10 2017
"""
import unittest
from entity_main import Entity
from docx import Document
import docx2txt
class TestEntity(unittest.TestCase):
'''The below function verifies the basic sanity functionality of the program
by validating the word count in the document before and after the highlighting
of the text'''
def test_sanity(self):
et = Entity()
et.highlight_address_fields()
et.highlight_contact_details()
et.highlight_dates()
person_count = et.tag_person_entities()
et.save_document()
# load the new document with highlighted text
new_raw_data = docx2txt.process('Contract_Output.docx')
new_cnt = 0
word_tokens = new_raw_data.split(' ')
for each_token in word_tokens:
if '[PERSON]' in each_token:
new_cnt +=1
self.assertEqual(person_count, new_cnt)
if __name__ == '__main__':
unittest.main()
|
mit
|
Python
|
4b14f12fa8bb6dca7ad91f187e7765bef27c0d65
|
Add some options
|
thrive-refugee/thrive-refugee,thrive-refugee/thrive-refugee,thrive-refugee/thrive-refugee
|
classes/admin.py
|
classes/admin.py
|
from django.contrib import admin
from classes.models import Attendee
from classes.models import Attendance
from classes.models import Session
from classes.models import WalkinClass
class AttendanceInline(admin.TabularInline):
model = Attendance
extra = 1
verbose_name = 'Attendee'
verbose_name_plural = 'Attendees'
fields = ('attendee', 'start_date_time', "stop_date_time", 'notes')
search_fields = 'name', 'phone'
class SessionInline(admin.TabularInline):
model = Session
extra = 1
fields = ('start_date_time', 'stop_date_time', 'teacher')
class AttendeeAdmin(admin.ModelAdmin):
pass
class SessionAdmin(admin.ModelAdmin):
inlines = [
AttendanceInline,
]
fields = ('walk_in_class','teacher', 'start_date_time', "stop_date_time", )
list_display= ('walk_in_class', 'start_date_time',)
date_hierarchy = 'start_date_time'
list_filter = ['walk_in_class', 'start_date_time', 'teacher']
ordering = ['-start_date_time']
class WalkinClassAdmin(admin.ModelAdmin):
inlines = [
SessionInline,
]
admin.site.register(Attendee, AttendeeAdmin)
admin.site.register(Session, SessionAdmin)
admin.site.register(WalkinClass, WalkinClassAdmin)
|
from django.contrib import admin
from classes.models import Attendee
from classes.models import Attendance
from classes.models import Session
from classes.models import WalkinClass
class AttendanceInline(admin.TabularInline):
model = Attendance
extra = 1
verbose_name = 'Attendee'
verbose_name_plural = 'Attendees'
fields = ('attendee', 'start_date_time', "stop_date_time", 'notes')
class SessionInline(admin.TabularInline):
model = Session
extra = 1
fields = ('start_date_time', 'stop_date_time', 'teacher')
class AttendeeAdmin(admin.ModelAdmin):
pass
class SessionAdmin(admin.ModelAdmin):
inlines = [
AttendanceInline,
]
fields = ('walk_in_class','teacher', 'start_date_time', "stop_date_time", )
list_display= ('walk_in_class', 'start_date_time',)
class WalkinClassAdmin(admin.ModelAdmin):
inlines = [
SessionInline,
]
admin.site.register(Attendee, AttendeeAdmin)
admin.site.register(Session, SessionAdmin)
admin.site.register(WalkinClass, WalkinClassAdmin)
|
mit
|
Python
|
aa681b4a36ce36c53933f3834eec9c721d6029cf
|
Update docker images utils
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
polyaxon/docker_images/image_info.py
|
polyaxon/docker_images/image_info.py
|
import logging
from typing import Any, Tuple
import conf
from constants.images_tags import LATEST_IMAGE_TAG
_logger = logging.getLogger('polyaxon.dockerizer.images')
def get_experiment_image_info(experiment: 'Experiment') -> Tuple[str, str]:
"""Return the image name and image tag for an experiment"""
project_name = experiment.project.name
repo_name = project_name
image_name = '{}/{}'.format(conf.get('REGISTRY_HOST'), repo_name)
image_tag = experiment.code_reference.commit
return image_name, image_tag
def get_job_image_info(project: 'Project', job: Any) -> Tuple[str, str]:
"""Return the image name and image tag for a job"""
project_name = project.name
repo_name = project_name
image_name = '{}/{}'.format(conf.get('REGISTRY_HOST'), repo_name)
try:
last_commit = project.repo.last_commit
except ValueError:
raise ValueError('Repo was not found for project `{}`.'.format(project))
return image_name, last_commit[0]
def get_notebook_image_info(project: 'Project', job: Any) -> Tuple[str, str]:
"""Return the image name and image tag for a job"""
image_name, _ = get_job_image_info(project, job)
return image_name, LATEST_IMAGE_TAG
def get_project_image_name(project_name: str, project_id: int) -> str:
return '{}/{}_{}'.format(conf.get('REGISTRY_HOST'),
project_name.lower(),
project_id)
def get_project_image_info(project_name: str, project_id: int, image_tag: str) -> Tuple[str, str]:
return get_project_image_name(project_name=project_name, project_id=project_id), image_tag
def get_project_tagged_image(project_name: str, project_id: int, image_tag: str) -> str:
image_name, image_tag = get_project_image_info(project_name=project_name,
project_id=project_id,
image_tag=image_tag)
return '{}:{}'.format(image_name, image_tag)
def get_image_name(build_job: 'BuildJob') -> str:
return get_project_image_name(project_name=build_job.project.name,
project_id=build_job.project.id)
def get_image_info(build_job: 'BuildJob') -> Tuple[str, str]:
return get_project_image_info(project_name=build_job.project.name,
project_id=build_job.project.id,
image_tag=build_job.uuid.hex)
def get_tagged_image(build_job: 'BuildJob') -> str:
return get_project_tagged_image(project_name=build_job.project.name,
project_id=build_job.project.id,
image_tag=build_job.uuid.hex)
|
import logging
from typing import Any, Tuple
import conf
from constants.images_tags import LATEST_IMAGE_TAG
_logger = logging.getLogger('polyaxon.dockerizer.images')
def get_experiment_image_info(experiment: 'Experiment') -> Tuple[str, str]:
"""Return the image name and image tag for an experiment"""
project_name = experiment.project.name
repo_name = project_name
image_name = '{}/{}'.format(conf.get('REGISTRY_HOST'), repo_name)
image_tag = experiment.code_reference.commit
return image_name, image_tag
def get_job_image_info(project: 'Project', job: Any)-> Tuple[str, str]:
"""Return the image name and image tag for a job"""
project_name = project.name
repo_name = project_name
image_name = '{}/{}'.format(conf.get('REGISTRY_HOST'), repo_name)
try:
last_commit = project.repo.last_commit
except ValueError:
raise ValueError('Repo was not found for project `{}`.'.format(project))
return image_name, last_commit[0]
def get_notebook_image_info(project: 'Project', job: Any) -> Tuple[str, str]:
"""Return the image name and image tag for a job"""
image_name, _ = get_job_image_info(project, job)
return image_name, LATEST_IMAGE_TAG
def get_image_name(build_job: 'BuildJob') -> str:
return '{}/{}_{}'.format(conf.get('REGISTRY_HOST'),
build_job.project.name.lower(),
build_job.project.id)
def get_image_info(build_job: 'BuildJob') -> Tuple[str, str]:
return get_image_name(build_job=build_job), build_job.uuid.hex
def get_tagged_image(build_job: 'BuildJob') -> str:
image_name, image_tag = get_image_info(build_job)
return '{}:{}'.format(image_name, image_tag)
|
apache-2.0
|
Python
|
7307a4b19b09f4408f569c580955f5c7d2af5f73
|
Update version number
|
auth0/auth0-python,auth0/auth0-python
|
auth0/__init__.py
|
auth0/__init__.py
|
__version__ = '2.0.0b4'
|
__version__ = '2.0.0b3'
|
mit
|
Python
|
443d56fdd2e588c11c2a1e3a685912b712e37d44
|
Make sure all fields are grabbed.
|
e-koch/canfar_scripts,e-koch/canfar_scripts
|
split/casanfar_split.py
|
split/casanfar_split.py
|
import os
import numpy as np
import sys
SDM_name = str(sys.argv[4])
print "Inputted MS: "+SDM_name
# SDM_name = '14B-088.sb30023144.eb30070731.57002.919034293984'
# Set up some useful variables (these will be altered later on)
msfile = SDM_name + '.ms'
hisplitms = SDM_name + '.hi.ms'
splitms = SDM_name + '.hi.src.split.ms'
pathname = os.environ.get('CASAPATH').split()[0]
pipepath = '/home/ekoch/pipe_scripts/'
source = 'M33*'
# VOS stuff
vos_dir = '../vos/'
vos_proc = './'
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
# Find the 21cm spw and check if the obs
# is single pointing or mosaic
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
print "Find HI spw..."
# But first find the spw corresponding to it
tb.open(vos_dir+msfile+'/SPECTRAL_WINDOW')
freqs = tb.getcol('REF_FREQUENCY')
nchans = tb.getcol('NUM_CHAN')
tb.close()
spws = range(0, len(freqs))
# Select the 21cm
sel = np.where((freqs > 1.40*10**9) & (freqs < 1.43*10**9))
hispw = str(spws[sel[0][0]])
freq = freqs[sel[0][0]]
nchan = nchans[sel[0][0]]
print "Selected spw "+str(hispw)
print "with frequency "+str(freq)
print "and "+str(nchan)+" channels"
print "Starting split the HI line"
# Mosaic or single pointing?
tb.open(vos_dir+msfile+'/FIELD')
names = tb.getcol('NAME')
tb.close()
moscount = 0
for name in names:
chsrc = name.find(source)
if chsrc != -1:
moscount = moscount+1
if moscount > 1:
imagermode = "mosaic"
else:
imagermode = "csclean"
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Split the corrected source data from the rest
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
print "Starting source split..."
#os.system('md5sum $(find '+vos_dir+hisplitms+') > '+vos_proc+hisplitms+'.md5')
# os.system('rm -rf '+vos_proc+splitms)
default('split')
vis = vos_dir+msfile
outputvis = vos_proc+hisplitms
field = source
spw = hispw
datacolumn = 'corrected'
keepflags = False
print vis
print outputvis
print field
print spw
split()
print "Created splitted-source .ms "+hisplitms
|
import os
import numpy as np
import sys
SDM_name = str(sys.argv[4])
print "Inputted MS: "+SDM_name
# SDM_name = '14B-088.sb30023144.eb30070731.57002.919034293984'
# Set up some useful variables (these will be altered later on)
msfile = SDM_name + '.ms'
hisplitms = SDM_name + '.hi.ms'
splitms = SDM_name + '.hi.src.split.ms'
pathname = os.environ.get('CASAPATH').split()[0]
pipepath = '/home/ekoch/pipe_scripts/'
source = 'M33'
# VOS stuff
vos_dir = '../vos/'
vos_proc = './'
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
# Find the 21cm spw and check if the obs
# is single pointing or mosaic
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%%&%&%&%&%&%&%%&%
print "Find HI spw..."
# But first find the spw corresponding to it
tb.open(vos_dir+msfile+'/SPECTRAL_WINDOW')
freqs = tb.getcol('REF_FREQUENCY')
nchans = tb.getcol('NUM_CHAN')
tb.close()
spws = range(0, len(freqs))
# Select the 21cm
sel = np.where((freqs > 1.40*10**9) & (freqs < 1.43*10**9))
hispw = str(spws[sel[0][0]])
freq = freqs[sel[0][0]]
nchan = nchans[sel[0][0]]
print "Selected spw "+str(hispw)
print "with frequency "+str(freq)
print "and "+str(nchan)+" channels"
print "Starting split the HI line"
# Mosaic or single pointing?
tb.open(vos_dir+msfile+'/FIELD')
names = tb.getcol('NAME')
tb.close()
moscount = 0
for name in names:
chsrc = name.find(source)
if chsrc != -1:
moscount = moscount+1
if moscount > 1:
imagermode = "mosaic"
else:
imagermode = "csclean"
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
# Split the corrected source data from the rest
# %&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%&%
print "Starting source split..."
#os.system('md5sum $(find '+vos_dir+hisplitms+') > '+vos_proc+hisplitms+'.md5')
# os.system('rm -rf '+vos_proc+splitms)
default('split')
vis = vos_dir+msfile
outputvis = vos_proc+hisplitms
field = source
spw = hispw
datacolumn = 'corrected'
keepflags = False
print vis
print outputvis
print field
print spw
split()
print "Created splitted-source .ms "+hisplitms
|
mit
|
Python
|
4e0d90fc157760606ae8503762f10bdef30bff8c
|
Remove trailing slashes
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
bluebottle/impact/urls/api.py
|
bluebottle/impact/urls/api.py
|
from django.conf.urls import url
from bluebottle.impact.views import (
ImpactTypeList,
ImpactGoalList,
ImpactGoalDetail
)
urlpatterns = [
url(r'^types$', ImpactTypeList.as_view(), name='impact-type-list'),
url(r'^goals$', ImpactGoalList.as_view(), name='impact-goal-list'),
url(
r'^goals/(?P<pk>\d+)$',
ImpactGoalDetail.as_view(),
name='impact-goal-details'
)
]
|
from django.conf.urls import url
from bluebottle.impact.views import (
ImpactTypeList,
ImpactGoalList,
ImpactGoalDetail
)
urlpatterns = [
url(r'^types/$', ImpactTypeList.as_view(), name='impact-type-list'),
url(r'^goals/$', ImpactGoalList.as_view(), name='impact-goal-list'),
url(
r'^goals/(?P<pk>\d+)/$',
ImpactGoalDetail.as_view(),
name='impact-goal-details'
)
]
|
bsd-3-clause
|
Python
|
0dd21b0f13aa7bf4cc3061dca216c65cf73975e5
|
Make registration reports filterable and harmonize URL
|
knowledgecommonsdc/kcdc3,knowledgecommonsdc/kcdc3,knowledgecommonsdc/kcdc3,knowledgecommonsdc/kcdc3,knowledgecommonsdc/kcdc3,knowledgecommonsdc/kcdc3
|
kcdc3/apps/classes/urls.py
|
kcdc3/apps/classes/urls.py
|
from django.conf.urls import patterns, include, url
from models import Event, Registration
from views import EventListView, EventDetailView, ResponseTemplateView, EventArchiveView, SessionView, RegistrationListView, TeacherAdminListView, FilteredTeacherAdminListView
urlpatterns = patterns('kcdc3.apps.classes.views',
url(r'^$', EventListView.as_view()),
url(r'^staff/teachers/$', TeacherAdminListView.as_view()),
url(r'^staff/teachers/session/(?P<slug>[A-Za-z0-9_-]+)/$', FilteredTeacherAdminListView.as_view()),
url(r'^staff/registrations/session/(?P<slug>[A-Za-z0-9_-]+)/$', RegistrationListView.as_view()),
url(r'^(?P<slug>[0-9_-]+)/$', EventArchiveView.as_view()),
url(r'^(?P<slug>[0-9_-]+)/background/$', SessionView.as_view()),
url(r'^response/(?P<slug>[A-Za-z0-9_-]+)$', ResponseTemplateView.as_view()),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/$', EventDetailView.as_view(model=Event,)),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/register$', 'register'),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/cancel$', 'cancel'),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/facilitator$', 'facilitator'),
)
|
from django.conf.urls import patterns, include, url
from models import Event, Registration
from views import EventListView, EventDetailView, ResponseTemplateView, EventArchiveView, SessionView, RegistrationListView, TeacherAdminListView, FilteredTeacherAdminListView
urlpatterns = patterns('kcdc3.apps.classes.views',
url(r'^$', EventListView.as_view()),
url(r'^staff/teachers/$', TeacherAdminListView.as_view()),
url(r'^staff/teachers/session/(?P<slug>[A-Za-z0-9_-]+)$', FilteredTeacherAdminListView.as_view()),
url(r'^dashboard/registrations/(?P<slug>[A-Za-z0-9_-]+)$', RegistrationListView.as_view()),
url(r'^(?P<slug>[0-9_-]+)/$', EventArchiveView.as_view()),
url(r'^(?P<slug>[0-9_-]+)/background/$', SessionView.as_view()),
url(r'^response/(?P<slug>[A-Za-z0-9_-]+)$', ResponseTemplateView.as_view()),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/$', EventDetailView.as_view(model=Event,)),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/register$', 'register'),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/cancel$', 'cancel'),
url(r'^(?P<slug>[A-Za-z0-9_-]+)/facilitator$', 'facilitator'),
)
|
mit
|
Python
|
80afbf3b5be1716553b93ee6ba57404d40e43a94
|
Remove multiple workers
|
haematologic/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,haematologic/cellcountr,haematologic/cellcounter,oghm2/hackdayoxford,haematologic/cellcounter,haematologic/cellcountr,oghm2/hackdayoxford
|
gunicorn_conf.py
|
gunicorn_conf.py
|
accesslog = '-'
access_log_format = '%({Host}i)s %(h)s %(l)s "%({X-Remote-User-Id}o)s: %({X-Remote-User-Name}o)s" %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s'
|
accesslog = '-'
access_log_format = '%({Host}i)s %(h)s %(l)s "%({X-Remote-User-Id}o)s: %({X-Remote-User-Name}o)s" %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s'
workers = 3
|
mit
|
Python
|
67346a13eb40d605da498b0bdba25ca661f08dd1
|
Remove unused imports
|
makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
|
geotrek/feedback/templatetags/feedback_tags.py
|
geotrek/feedback/templatetags/feedback_tags.py
|
import json
from geotrek.feedback.models import PredefinedEmail, ReportStatus
from django import template
from django.conf import settings
register = template.Library()
@register.simple_tag
def suricate_management_enabled():
return settings.SURICATE_MANAGEMENT_ENABLED
@register.simple_tag
def suricate_workflow_enabled():
return settings.SURICATE_WORKFLOW_ENABLED
@register.simple_tag
def enable_report_colors_per_status():
return settings.ENABLE_REPORT_COLORS_PER_STATUS
@register.simple_tag
def status_ids_and_colors():
status_ids_and_colors = {
status.pk: {
"id": str(status.identifier),
"color": str(status.color)
}
for status in ReportStatus.objects.all()
}
return json.dumps(status_ids_and_colors)
@register.simple_tag
def predefined_emails():
predefined_emails = {
email.pk: {
"label": str(email.label),
"text": str(email.text)
}
for email in PredefinedEmail.objects.all()
}
return json.dumps(predefined_emails)
@register.simple_tag
def resolved_intervention_info(report):
if report:
username = "'?'"
intervention = report.report_interventions().first()
authors = intervention.authors
if authors:
user = authors.last() # oldest author is the one that created the intervention
if user.profile and user.profile.extended_username:
username = user.profile.extended_username
else:
username = user.username
resolved_intervention_info = {
"date": report.interventions.first().date.strftime("%d/%m/%Y") if report.interventions else None,
"username": username
}
return json.dumps(resolved_intervention_info)
return json.dumps({})
|
import json
from geotrek.feedback.models import PredefinedEmail, ReportStatus
from mapentity.models import LogEntry
from django import template
from django.conf import settings
register = template.Library()
@register.simple_tag
def suricate_management_enabled():
return settings.SURICATE_MANAGEMENT_ENABLED
@register.simple_tag
def suricate_workflow_enabled():
return settings.SURICATE_WORKFLOW_ENABLED
@register.simple_tag
def enable_report_colors_per_status():
return settings.ENABLE_REPORT_COLORS_PER_STATUS
@register.simple_tag
def status_ids_and_colors():
status_ids_and_colors = {
status.pk: {
"id": str(status.identifier),
"color": str(status.color)
}
for status in ReportStatus.objects.all()
}
return json.dumps(status_ids_and_colors)
@register.simple_tag
def predefined_emails():
predefined_emails = {
email.pk: {
"label": str(email.label),
"text": str(email.text)
}
for email in PredefinedEmail.objects.all()
}
return json.dumps(predefined_emails)
@register.simple_tag
def resolved_intervention_info(report):
if report:
username = "'?'"
intervention = report.report_interventions().first()
authors = intervention.authors
if authors:
user = authors.last() # oldest author is the one that created the intervention
if user.profile and user.profile.extended_username:
username = user.profile.extended_username
else:
username = user.username
resolved_intervention_info = {
"date": report.interventions.first().date.strftime("%d/%m/%Y") if report.interventions else None,
"username": username
}
return json.dumps(resolved_intervention_info)
return json.dumps({})
|
bsd-2-clause
|
Python
|
eb712d30a6231b416e33d02a125daddf5322d51e
|
Add API docs for the Exscript.util.syslog module.
|
maximumG/exscript,knipknap/exscript,maximumG/exscript,knipknap/exscript
|
src/Exscript/util/syslog.py
|
src/Exscript/util/syslog.py
|
# Copyright (C) 2007-2010 Samuel Abels.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
Send messages to a syslog server.
"""
import imp
import socket
# This way of loading a module prevents Python from looking in the
# current directory. (We need to avoid it due to the syslog module
# name collision.)
syslog = imp.load_module('syslog', *imp.find_module('syslog'))
def netlog(message,
source = None,
host = 'localhost',
port = 514,
priority = syslog.LOG_DEBUG,
facility = syslog.LOG_USER):
"""
Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
@type source: str
@param source: The source address.
@type host: str
@param host: The IP address or hostname of the receiving server.
@type port: str
@param port: The TCP port number of the receiving server.
@type priority: int
@param priority: The message priority.
@type facility: int
@param facility: The message facility.
"""
if not source:
source = '%s[%s]' + (sys.argv[0], os.getpid())
data = '<%d>%s: %s' % (priority + facility, source, message)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(data, (host, port))
sock.close()
|
import imp, socket
# This way of loading a module prevents Python from looking in the
# current directory. (We need to avoid it due to the syslog module
# name collision.)
syslog = imp.load_module('syslog', *imp.find_module('syslog'))
def netlog(message,
source = None,
host = 'localhost',
port = 514,
priority = syslog.LOG_DEBUG,
facility = syslog.LOG_USER):
"""
Python's built in syslog module does not support networking, so
this is the alternative.
The source argument specifies the message source that is
documented on the receiving server. It defaults to "scriptname[pid]",
where "scriptname" is sys.argv[0], and pid is the current process id.
The priority and facility arguments are equivalent to those of
Python's built in syslog module.
@type source: str
@param source: The source address.
@type host: str
@param host: The IP address or hostname of the receiving server.
@type port: str
@param port: The TCP port number of the receiving server.
@type priority: int
@param priority: The message priority.
@type facility: int
@param facility: The message facility.
"""
if not source:
source = '%s[%s]' + (sys.argv[0], os.getpid())
data = '<%d>%s: %s' % (priority + facility, source, message)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(data, (host, port))
sock.close()
|
mit
|
Python
|
e05243983cb9167303a19e85a3c88f74da8e2612
|
Convert ipLocation function name to all lowercase
|
gdestuynder/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,gdestuynder/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,mozilla/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,mozilla/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,Phrozyn/MozDef,mozilla/MozDef,jeffbryner/MozDef,mozilla/MozDef,gdestuynder/MozDef,Phrozyn/MozDef
|
bot/slack/commands/ip_info.py
|
bot/slack/commands/ip_info.py
|
import netaddr
import os
from mozdef_util.geo_ip import GeoIP
def is_ip(ip):
try:
netaddr.IPNetwork(ip)
return True
except Exception:
return False
def ip_location(ip):
location = ""
try:
geoip_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../data/GeoLite2-City.mmdb")
geoip = GeoIP(geoip_data_dir)
geoDict = geoip.lookup_ip(ip)
if geoDict is not None:
if 'error' in geoDict:
return geoDict['error']
location = geoDict['country_name']
if geoDict['country_code'] in ('US'):
if geoDict['metro_code']:
location = location + '/{0}'.format(geoDict['metro_code'])
except Exception:
location = ""
return location
class command():
def __init__(self):
self.command_name = '!ipinfo'
self.help_text = 'Perform a geoip lookup on an ip address'
def handle_command(self, parameters):
response = ""
for ip_token in parameters:
if is_ip(ip_token):
ip = netaddr.IPNetwork(ip_token)[0]
if (not ip.is_loopback() and not ip.is_private() and not ip.is_reserved()):
response += "{0} location: {1}\n".format(ip_token, ip_location(ip_token))
else:
response += "{0}: hrm...loopback? private ip?\n".format(ip_token)
else:
response = "{0} is not an IP address".format(ip_token)
return response
|
import netaddr
import os
from mozdef_util.geo_ip import GeoIP
def is_ip(ip):
try:
netaddr.IPNetwork(ip)
return True
except Exception:
return False
def ipLocation(ip):
location = ""
try:
geoip_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../data/GeoLite2-City.mmdb")
geoip = GeoIP(geoip_data_dir)
geoDict = geoip.lookup_ip(ip)
if geoDict is not None:
if 'error' in geoDict:
return geoDict['error']
location = geoDict['country_name']
if geoDict['country_code'] in ('US'):
if geoDict['metro_code']:
location = location + '/{0}'.format(geoDict['metro_code'])
except Exception:
location = ""
return location
class command():
def __init__(self):
self.command_name = '!ipinfo'
self.help_text = 'Perform a geoip lookup on an ip address'
def handle_command(self, parameters):
response = ""
for ip_token in parameters:
if is_ip(ip_token):
ip = netaddr.IPNetwork(ip_token)[0]
if (not ip.is_loopback() and not ip.is_private() and not ip.is_reserved()):
response += "{0} location: {1}\n".format(ip_token, ipLocation(ip_token))
else:
response += "{0}: hrm...loopback? private ip?\n".format(ip_token)
else:
response = "{0} is not an IP address".format(ip_token)
return response
|
mpl-2.0
|
Python
|
859d5cd5ac60785f64a87353ae8f9170f5e29100
|
Make uri absolute, add get_release_data api
|
rocketDuck/folivora,rocketDuck/folivora,rocketDuck/folivora
|
folivora/utils/pypi.py
|
folivora/utils/pypi.py
|
#-*- coding: utf-8 -*-
"""
folivora.utils.pypi
~~~~~~~~~~~~~~~~~~~
Utilities to access pypi compatible servers.
"""
import time
import xmlrpclib
def get_seconds(hours):
"""Get number of seconds since epoch from now minus `hours`"""
return int(time.time() - (60 * 60) * hours)
DEFAULT_SERVER = 'http://pypi.python.org/pypi/'
class CheeseShop(object):
def __init__(self, server=DEFAULT_SERVER):
self.xmlrpc = xmlrpclib.Server(server)
def get_package_versions(self, package_name):
"""Fetch list of available versions for a package.
:param package_name: Name of the package to query.
"""
return self.xmlrpc.package_releases(package_name)
def get_package_list(self):
"""Fetch the master list of package names."""
return self.xmlrpc.list_packages()
def search(self, spec, operator):
"""Query using search spec."""
return self.xmlrpc.search(spec, operator.lower())
def get_changelog(self, hours):
"""Query the changelog.
:param hours: Hours from now to specify the changelog size.
"""
return self.xmlrpc.changelog(get_seconds(hours))
def get_updated_releases(self, hours):
"""Query all updated releases within `hours`.
:param hours: Specify the number of hours to find updated releases.
"""
return self.xmlrpc.updated_releases(get_seconds(hours))
def get_release_urls(self, package_name, version):
"""Query for all available release urls of `package_name`.
:param package_name: Name of the package.
:param version: Version of the package.
"""
return self.xmlrpc.release_urls(package_name, version)
def get_release_data(self, package_name, version=None):
"""Query for specific release data.
:param package_name: Name of the package.
:param version: Version to query the data. If `None`, it's latest
version will be used.
"""
if version is None:
version = self.get_package_versions(package_name)[-1]
return self.xmlrpc.release_data(package_name, version)
|
#-*- coding: utf-8 -*-
"""
folivora.utils.pypi
~~~~~~~~~~~~~~~~~~~
Utilities to access pypi compatible servers.
"""
import time
import xmlrpclib
def get_seconds(hours):
"""Get number of seconds since epoch from now minus `hours`"""
return int(time.time() - (60 * 60) * hours)
XML_RPC_SERVER = 'http://pypi.python.org/pypi'
class CheeseShop(object):
def __init__(self, server=XML_RPC_SERVER):
self.xmlrpc = xmlrpclib.Server(server)
def get_package_versions(self, package_name):
"""Fetch list of available versions for a package.
:param package_name: Name of the package to query.
"""
return self.xmlrpc.package_releases(package_name)
def get_package_list(self):
"""Fetch the master list of package names."""
return self.xmlrpc.list_packages()
def search(self, spec, operator):
"""Query using search spec."""
return self.xmlrpc.search(spec, operator.lower())
def get_changelog(self, hours):
"""Query the changelog.
:param hours: Hours from now to specify the changelog size.
"""
return self.xmlrpc.changelog(get_seconds(hours))
def get_updated_releases(self, hours):
"""Query all updated releases within `hours`.
:param hours: Specify the number of hours to find updated releases.
"""
return self.xmlrpc.updated_releases(get_seconds(hours))
def get_release_urls(self, package_name, version):
"""Query for all available release urls of `package_name`.
:param package_name: Name of the package.
:param version: Version of the package.
"""
return self.xmlrpc.release_urls(package_name, version)
|
isc
|
Python
|
30b991e78158f8dee25a34565493b1ca582d51c5
|
Simplify attribute check (menu items)
|
bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia,bittner/cmsplugin-zinnia,django-blog-zinnia/cmsplugin-zinnia
|
cmsplugin_zinnia/cms_toolbar.py
|
cmsplugin_zinnia/cms_toolbar.py
|
"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
user = self.request.user
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(
_('New entry'), url=url,
disabled=not user.has_perm('zinnia.add_entry'))
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(
_('New category'), url=url,
disabled=not user.has_perm('zinnia.add_category'))
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(
_('Entries list'), url=url,
disabled=not user.has_perm('zinnia.change_entry'))
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(
_('Categories list'), url=url,
disabled=not user.has_perm('zinnia.change_category'))
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(
_('Tags list'), url=url,
disabled=not user.has_perm('tagging.change_tag'))
# remove complete menu if all items are disabled
for item in zinnia_menu.get_items():
if not getattr(item, 'disabled', True):
return
self.toolbar.remove_item(zinnia_menu)
toolbar_pool.register(ZinniaToolbar)
|
"""Toolbar extensions for CMS"""
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
class ZinniaToolbar(CMSToolbar):
def populate(self):
user = self.request.user
zinnia_menu = self.toolbar.get_or_create_menu(
'zinnia-menu', _('Zinnia'))
url = reverse('admin:zinnia_entry_add')
zinnia_menu.add_sideframe_item(
_('New entry'), url=url,
disabled=not user.has_perm('zinnia.add_entry'))
url = reverse('admin:zinnia_category_add')
zinnia_menu.add_sideframe_item(
_('New category'), url=url,
disabled=not user.has_perm('zinnia.add_category'))
zinnia_menu.add_break()
url = reverse('admin:zinnia_entry_changelist')
zinnia_menu.add_sideframe_item(
_('Entries list'), url=url,
disabled=not user.has_perm('zinnia.change_entry'))
url = reverse('admin:zinnia_category_changelist')
zinnia_menu.add_sideframe_item(
_('Categories list'), url=url,
disabled=not user.has_perm('zinnia.change_category'))
url = reverse('admin:tagging_tag_changelist')
zinnia_menu.add_sideframe_item(
_('Tags list'), url=url,
disabled=not user.has_perm('tagging.change_tag'))
# remove complete menu if all items are disabled
for item in zinnia_menu.get_items():
if hasattr(item, 'disabled') and not item.disabled:
return
self.toolbar.remove_item(zinnia_menu)
toolbar_pool.register(ZinniaToolbar)
|
bsd-3-clause
|
Python
|
b3f926e013e81bb88e6634d453b31c5c30aac997
|
Add constant to distance scoring functions
|
JungeAlexander/cocoscore
|
cocoscore/ml/distance_scores.py
|
cocoscore/ml/distance_scores.py
|
from math import exp
def _distance_scorer(data_df, score_function):
distance_column = 'distance'
if distance_column not in data_df.columns:
raise ValueError(f'The given data_df does not have a {distance_column} column.')
distances = data_df.loc[:, distance_column]
return distances.apply(score_function)
def reciprocal_distance(data_df, *_):
"""
Computes reciprocal distance scores for a given DataFrame of co-mentions.
The reciprocal distance score is defined as 1/x where x is the the distance of the closest matches of an
entity pair of interest.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:returns a pandas Series of distance scores
"""
return polynomial_decay_distance(data_df, 1, 0)
def constant_distance(data_df, *_):
"""
Returns a constant distance score of 1 for a given DataFrame of co-mentions.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, score_function=lambda x: 1.0)
def exponential_decay_distance(data_df, k, c):
"""
Computes exponentially decaying distance scores for a given DataFrame of co-mentions.
The exponentially decaying distance score is defined as exp(-k*x) + c where
x is the the distance of the closest matches of an
entity pair of interest and k is a positive constant.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:param k: float, a positive constant
:param c: float, a positive constant
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, lambda x: exp(-k * x) + c)
def polynomial_decay_distance(data_df, k, c):
"""
Computes polynomially decaying distance scores for a given DataFrame of co-mentions.
The polynomially decaying distance score is defined as x^(-k) + c where
x is the the distance of the closest matches of an
entity pair of interest and k is a positive constant.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:param k: float, a positive constant
:param c: float, a positive constant
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, lambda x: x ** (-k) + c)
|
from math import exp
def _distance_scorer(data_df, score_function):
distance_column = 'distance'
if distance_column not in data_df.columns:
raise ValueError(f'The given data_df does not have a {distance_column} column.')
distances = data_df.loc[:, distance_column]
return distances.apply(score_function)
def reciprocal_distance(data_df, *_):
"""
Computes reciprocal distance scores for a given DataFrame of co-mentions.
The reciprocal distance score is defined as 1/x where x is the the distance of the closest matches of an
entity pair of interest.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:returns a pandas Series of distance scores
"""
return polynomial_decay_distance(data_df, 1)
def constant_distance(data_df, *_):
"""
Returns a constant distance score of 1 for a given DataFrame of co-mentions.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, score_function=lambda x: 1.0)
def exponential_decay_distance(data_df, k):
"""
Computes exponentially decaying distance scores for a given DataFrame of co-mentions.
The exponentially decaying distance score is defined as exp(-k*x) where
x is the the distance of the closest matches of an
entity pair of interest and k is a positive constant.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:param k: float, a positive constant
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, lambda x: exp(-k * x))
def polynomial_decay_distance(data_df, k):
"""
Computes polynomially decaying distance scores for a given DataFrame of co-mentions.
The polynomially decaying distance score is defined as x^(-k) where
x is the the distance of the closest matches of an
entity pair of interest and k is a positive constant.
:param data_df: pandas DataFrame, the data set loaded using
tools.data_tools.load_data_frame(..., match_distance=True)
:param k: float, a positive constant
:returns a pandas Series of distance scores
"""
return _distance_scorer(data_df, lambda x: x ** (-k))
|
mit
|
Python
|
33dd1a78a5bfdf0eca593816b15b34b86860c36f
|
install pip to bypass rally installation problem
|
CiscoSystems/os-sqe,CiscoSystems/os-sqe,CiscoSystems/os-sqe
|
lab/runners/RunnerRally.py
|
lab/runners/RunnerRally.py
|
from lab.runners import Runner
class RunnerRally(Runner):
def sample_config(self):
return {'cloud': 'cloud name', 'task-yaml': 'path to the valid task yaml file'}
def __init__(self, config):
from lab.WithConfig import read_config_from_file
super(RunnerRally, self).__init__(config=config)
self.cloud_name = config['cloud']
self.task_yaml_path = config['task-yaml']
self.task_body = read_config_from_file(yaml_path=self.task_yaml_path, is_as_string=True)
def execute(self, clouds, servers):
cloud = clouds[0]
server = servers[0]
open_rc_path = '{0}.openrc'.format(self.cloud_name)
results_path = 'rally-results.html'
task_path = 'rally-task.yaml'
venv_path = '~/venv_rally'
open_rc_body = cloud.create_open_rc()
server.create_user(new_username='rally')
server.put(string_to_put=open_rc_body, file_name=open_rc_path)
server.put(string_to_put=self.task_body, file_name=task_path)
repo_dir = server.clone_repo(repo_url='https://git.openstack.org/openstack/rally.git')
server.check_or_install_packages(package_names='libffi-devel gmp-devel postgresql-devel wget python-virtualenv')
server.run(command='sudo easy_install pip')
server.run(command='./install_rally.sh -y -d {0}'.format(venv_path), in_directory=repo_dir)
server.run(command='source {0} && {1}/bin/rally deployment create --fromenv --name {2}'.format(open_rc_path, venv_path, self.cloud_name))
server.run(command='{0}/bin/rally task start {1}'.format(venv_path, task_path))
server.run(command='{0}/bin/rally task report --out {1}'.format(venv_path, results_path))
server.get(remote_path=results_path, local_path=results_path)
self.get_artefacts(server=server)
|
from lab.runners import Runner
class RunnerRally(Runner):
def sample_config(self):
return {'cloud': 'cloud name', 'task-yaml': 'path to the valid task yaml file'}
def __init__(self, config):
from lab.WithConfig import read_config_from_file
super(RunnerRally, self).__init__(config=config)
self.cloud_name = config['cloud']
self.task_yaml_path = config['task-yaml']
self.task_body = read_config_from_file(yaml_path=self.task_yaml_path, is_as_string=True)
def execute(self, clouds, servers):
cloud = clouds[0]
server = servers[0]
open_rc_path = '{0}.openrc'.format(self.cloud_name)
results_path = 'rally-results.html'
task_path = 'rally-task.yaml'
venv_path = '~/venv_rally'
open_rc_body = cloud.create_open_rc()
server.create_user(new_username='rally')
server.put(string_to_put=open_rc_body, file_name=open_rc_path)
server.put(string_to_put=self.task_body, file_name=task_path)
repo_dir = server.clone_repo(repo_url='https://git.openstack.org/openstack/rally.git')
server.check_or_install_packages(package_names='libffi-devel gmp-devel postgresql-devel wget python-virtualenv')
server.run(command='./install_rally.sh -y -d {0}'.format(venv_path), in_directory=repo_dir)
server.run(command='source {0} && {1}/bin/rally deployment create --fromenv --name {2}'.format(open_rc_path, venv_path, self.cloud_name))
server.run(command='{0}/bin/rally task start {1}'.format(venv_path, task_path))
server.run(command='{0}/bin/rally task report --out {1}'.format(venv_path, results_path))
server.get(remote_path=results_path, local_path=results_path)
self.get_artefacts(server=server)
|
apache-2.0
|
Python
|
a390d2551a5e39ad35888c4b326f50212b60cabf
|
add description of exception
|
xsank/Pyeventbus,xsank/Pyeventbus
|
eventbus/exception.py
|
eventbus/exception.py
|
__author__ = 'Xsank'
class EventTypeError(Exception):
'''Event type is invalid!'''
def __str__(self):
return self.__doc__
class UnregisterError(Exception):
'''No listener to unregister!'''
def __str__(self):
return self.__doc__
|
__author__ = 'Xsank'
class EventTypeError(Exception):
'''Event type is invalid!'''
class UnregisterError(Exception):
'''No listener to unregister!'''
|
mit
|
Python
|
0f1fdb93c8005a26fcea10f708252a9e5f358270
|
add compare string in JRC
|
SubhasisDutta/JRC-Name-Parser,SubhasisDutta/CAMEO-JRC-Database,SubhasisDutta/CAMEO-JRC-Database,SubhasisDutta/CAMEO-JRC-Database,SubhasisDutta/CAMEO-JRC-Database,SubhasisDutta/CAMEO-JRC-Database
|
src/JRCFileParserService.py
|
src/JRCFileParserService.py
|
'''
Created on Jan 30, 2017
@author: Subhasis
'''
import csv
from MongoManager import MongoManager
class JRCFileParserService(object):
'''
This class takes care of reading the input file parsing the text line by line and pushing it into MongoDB.
'''
def __init__(self, file_path, db_config, schema, table, batch_size):
self.file_path = file_path
self.manager = MongoManager(schema, table, batch_size, db_config)
def process(self):
print "Reading File ", self.file_path
count_record = 0
entity_count = 0
similar_record = []
previous_record_id = '0'
with open(self.file_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
if previous_record_id != row[0]:
self.manager.pushRecords(self.getInsertObject(similar_record))
entity_count += 1
similar_record = []
similar_record.append(row)
previous_record_id = row[0]
count_record += 1
self.manager.pushRecords(self.getInsertObject(similar_record))
print "Records Processed ", count_record
print "Entity Processed ", entity_count
return self.manager.flushBatch()
def getInsertObject(self, data_list):
d = {}
d['id'] = int(data_list[0][0])
d['type'] = 'UNKNOWN'
if data_list[0][1] == 'P':
d['type'] = 'PERSON'
if data_list[0][1] == 'O':
d['type'] = 'ORGANIZATION'
variations = []
compare_strings = []
for r in data_list:
v = {}
v['lang'] = r[2]
v['name'] = r[3]
variations.append(v)
compare_strings.append(r[3].lower())
d['variations'] = variations
d['compare_strings'] = compare_strings
return d
|
'''
Created on Jan 30, 2017
@author: Subhasis
'''
import csv
from MongoManager import MongoManager
class JRCFileParserService(object):
'''
This class takes care of reading the input file parsing the text line by line and pushing it into MongoDB.
'''
def __init__(self, file_path, db_config, schema, table, batch_size):
self.file_path = file_path
self.manager = MongoManager(schema, table, batch_size, db_config)
def process(self):
print "Reading File ", self.file_path
count_record = 0
entity_count = 0
similar_record = []
previous_record_id = '0'
with open(self.file_path, 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t')
for row in reader:
if previous_record_id != row[0]:
self.manager.pushRecords(self.getInsertObject(similar_record))
entity_count += 1
similar_record = []
similar_record.append(row)
previous_record_id = row[0]
count_record += 1
self.manager.pushRecords(self.getInsertObject(similar_record))
print "Records Processed ", count_record
print "Entity Processed ", entity_count
return self.manager.flushBatch()
def getInsertObject(self, data_list):
d = {}
d['id'] = int(data_list[0][0])
d['type'] = 'UNKNOWN'
if data_list[0][1] == 'P':
d['type'] = 'PERSON'
if data_list[0][1] == 'O':
d['type'] = 'ORGANIZATION'
variations = []
for r in data_list:
v = {}
v['lang'] = r[2]
v['name'] = r[3]
variations.append(v)
d['variations'] = variations
return d
|
apache-2.0
|
Python
|
0ff9ccacf20d2896353df906426db06ce8c24605
|
Update ASIC count from 7 to 10
|
archangdcc/avalon-extras,qinfengling/Avalon-extras,qinfengling/Avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,archangdcc/avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,Canaan-Creative/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras,archangdcc/avalon-extras,Canaan-Creative/Avalon-extras,qinfengling/Avalon-extras
|
scripts/avalon3-a3233-modular-test.py
|
scripts/avalon3-a3233-modular-test.py
|
#!/usr/bin/env python2.7
# This simple script was for test A3255 modular. there are 128 cores in one A3255 chip.
# If all cores are working the number should be 0.
# If some of them not working the number is the broken cores count.
from serial import Serial
from optparse import OptionParser
import binascii
import sys
parser = OptionParser()
parser.add_option("-s", "--serial", dest="serial_port", default="/dev/ttyUSB0", help="Serial port")
(options, args) = parser.parse_args()
ser = Serial(options.serial_port, 115200, 8, timeout=8)
cmd="415614010100000000000000000000000000000000000000000000000000000000000000000000"
#cmd="415614010100000000000000000000000000000000000000000000000000000000000000011021"
#cmd="415614010100000000000000000000000000000000000000000000000000000000000000022042"
while (1):
print ("Reading result ...")
ser.write(cmd.decode('hex'))
count = 0
while (1):
res_s = ser.read(39)
if not res_s:
print(str(count) + ": Something is wrong or modular id not correct")
else :
result = binascii.hexlify(res_s)
for i in range(0, 11):
number = '{:03}'.format(int(result[10 + i * 2:12 + i * 2], 16))
if (i == 0):
sys.stdout.write(number + ":\t")
else :
sys.stdout.write(number + "\t")
sys.stdout.flush()
print("")
count = count + 1
if (count == 5):
raw_input('Press enter to continue:')
break
|
#!/usr/bin/env python2.7
# This simple script was for test A3255 modular. there are 128 cores in one A3255 chip.
# If all cores are working the number should be 0.
# If some of them not working the number is the broken cores count.
from serial import Serial
from optparse import OptionParser
import binascii
import sys
parser = OptionParser()
parser.add_option("-s", "--serial", dest="serial_port", default="/dev/ttyUSB0", help="Serial port")
(options, args) = parser.parse_args()
ser = Serial(options.serial_port, 115200, 8, timeout=8)
cmd="415614010100000000000000000000000000000000000000000000000000000000000000000000"
#cmd="415614010100000000000000000000000000000000000000000000000000000000000000011021"
#cmd="415614010100000000000000000000000000000000000000000000000000000000000000022042"
while (1):
print ("Reading result ...")
ser.write(cmd.decode('hex'))
count = 0
while (1):
res_s = ser.read(39)
if not res_s:
print(str(count) + ": Something is wrong or modular id not correct")
else :
result = binascii.hexlify(res_s)
for i in range(0, 8):
number = '{:03}'.format(int(result[10 + i * 2:12 + i * 2], 16))
if (i == 0):
sys.stdout.write(number + ":\t")
else :
sys.stdout.write(number + "\t")
sys.stdout.flush()
print("")
count = count + 1
if (count == 5):
raw_input('Press enter to continue:')
break
|
unlicense
|
Python
|
1b704c24eaeb412e0636e5a0111ce2ac990998fd
|
remove confirm_text option in example
|
shymonk/django-datatable,arambadk/django-datatable,shymonk/django-datatable,arambadk/django-datatable,shymonk/django-datatable,arambadk/django-datatable
|
example/app/tables.py
|
example/app/tables.py
|
#!/usr/bin/env python
# coding: utf-8
from table.columns import Column, LinkColumn, Link
from table.utils import A
from table import Table
from models import Person
class PersonTable(Table):
id = Column(field='id', header=u'序号', header_attrs={'width': '50%'})
name = Column(field='name', header=u'姓名', header_attrs={'width': '50%'})
class Meta:
model = Person
ext_button_link = "http://www.baidu.com"
class LinkColumnTable(Table):
id = Column(field='id', header=u'序号', header_attrs={'width': '33%'})
name = Column(field='name', header=u'姓名', header_attrs={'width': '33%'})
action = LinkColumn(header=u'操作', header_attrs={'width': '33%'}, links=[
Link(text=u'编辑', viewname='app.views.edit', args=('id',), confirm=u"确定吗?")])
class Meta:
model = Person
|
#!/usr/bin/env python
# coding: utf-8
from table.columns import Column, LinkColumn, Link
from table.utils import A
from table import Table
from models import Person
class PersonTable(Table):
id = Column(field='id', header=u'序号', header_attrs={'width': '50%'})
name = Column(field='name', header=u'姓名', header_attrs={'width': '50%'})
class Meta:
model = Person
ext_button_link = "http://www.baidu.com"
class LinkColumnTable(Table):
id = Column(field='id', header=u'序号', header_attrs={'width': '33%'})
name = Column(field='name', header=u'姓名', header_attrs={'width': '33%'})
action = LinkColumn(header=u'操作', header_attrs={'width': '33%'}, links=[
Link(text=u'编辑', viewname='app.views.edit', args=('id',), confirm=True, confirm_text=u"确定吗?")])
class Meta:
model = Person
|
mit
|
Python
|
cb34162de51f36e2d6b846cfbb6e9d6fe8801e48
|
implement send message
|
mlsteele/one-time-chat,mlsteele/one-time-chat,mlsteele/one-time-chat
|
client/client.py
|
client/client.py
|
class OTC_Client(object):
def __init__(self):
raise NotImplementedError("TODO: write a client")
def send(message):
payload = {'message':message}
r = requets.post(self.server_address,data=payload)
raise NotImplementedError("TODO: write send method")
def recieve():
raise NotImplementedError("TODO: write recieve")
def decrypt(message, pad_index=current_index):
raise NotImplementedError("TODO: clients need to decrypt messages")
def encrypt(encrypt):
raise NotImplementedError("TODO: clients need to encrypt messages")
def connect(server_address):
self.server_address = server_address
raise NotImplementedError("TODO:clients need to be able to connect to server")
if __name__ == "__main__":
client = OTC_Client()
|
class OTC_Client(object):
def __init__(self):
raise NotImplementedError("TODO: write a client")
def send(message):
raise NotImplementedError("TODO: write send method")
def recieve():
raise NotImplementedError("TODO: write recieve")
def decrypt(message, pad_index=current_index):
raise NotImplementedError("TODO: clients need to decrypt messages")
def encrypt(encrypt):
raise NotImplementedError("TODO: clients need to encrypt messages")
def connect():
raise NotImplementedError("TODO:clients need to be able to connect to server")
if __name__ == "__main__":
client = OTC_Client()
|
mit
|
Python
|
f1ed9cf573ec8aaa61e9aefb124b453a5a353db4
|
fix pointe-claire
|
opencivicdata/scrapers-ca,opencivicdata/scrapers-ca
|
ca_qc_pointe_claire/people.py
|
ca_qc_pointe_claire/people.py
|
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.ville.pointe-claire.qc.ca/en/city-hall-administration/your-council/municipal-council.html'
class PointeClairePersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
mayor = page.xpath('.//div[@class="item-page clearfix"]//table[1]//p')[1]
name = mayor.xpath('.//strong/text()')[0]
p = Legislator(name=name, post_id='Pointe-Claire', role='Maire')
p.add_source(COUNCIL_PAGE)
phone = re.findall(r'[0-9]{3}[ -][0-9]{3}-[0-9]{4}', mayor.text_content())[0].replace(' ', '-')
p.add_contact('voice', phone, 'legislature')
yield p
rows = page.xpath('//tr')
for i, row in enumerate(rows):
if i % 2 == 0:
continue
councillors = row.xpath('./td')
for j, councillor in enumerate(councillors):
name = councillor.text_content()
# rows[i + 1].xpath('.//td//a[contains(@href, "maps")]/text()')[j] # district number
district = rows[i + 1].xpath('.//td/p[1]/text()')[j].replace(' / ', '/')
p = Legislator(name=name, post_id=district, role='Conseiller')
p.add_source(COUNCIL_PAGE)
p.image = councillor.xpath('.//img/@src')[0]
phone = re.findall(r'[0-9]{3}[ -][0-9]{3}-[0-9]{4}', rows[i + 1].xpath('.//td')[j].text_content())[0].replace(' ', '-')
p.add_contact('voice', phone, 'legislature')
yield p
|
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
import re
COUNCIL_PAGE = 'http://www.ville.pointe-claire.qc.ca/en/city-hall-administration/your-council/municipal-council.html'
class PointeClairePersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
mayor = page.xpath('.//div[@class="item-page clearfix"]//table[1]//p')[1]
name = mayor.xpath('.//strong/text()')[0]
p = Legislator(name=name, post_id='Pointe-Claire', role='Maire')
p.add_source(COUNCIL_PAGE)
phone = re.findall(r'[0-9]{3} [0-9]{3}-[0-9]{4}', mayor.text_content())[0].replace(' ', '-')
p.add_contact('voice', phone, 'legislature')
yield p
rows = page.xpath('//tr')
for i, row in enumerate(rows):
if i % 2 == 0:
continue
councillors = row.xpath('./td')
for j, councillor in enumerate(councillors):
name = councillor.text_content()
# rows[i + 1].xpath('.//td//a[contains(@href, "maps")]/text()')[j] # district number
district = rows[i + 1].xpath('.//td/p[1]/text()')[j].replace(' / ', '/')
p = Legislator(name=name, post_id=district, role='Conseiller')
p.add_source(COUNCIL_PAGE)
p.image = councillor.xpath('.//img/@src')[0]
phone = re.findall(r'[0-9]{3} [0-9]{3}-[0-9]{4}', rows[i + 1].xpath('.//td')[j].text_content())[0].replace(' ', '-')
p.add_contact('voice', phone, 'legislature')
yield p
|
mit
|
Python
|
9a3081c58818ad28e216e9a14fc573c4a392f55f
|
Add method for getting csv Hours Worked reports for Jobs Board and Payment Plan
|
pkimber/invoice,pkimber/invoice,pkimber/invoice
|
invoice/management/commands/ticket_time_csv.py
|
invoice/management/commands/ticket_time_csv.py
|
# -*- encoding: utf-8 -*-
import csv
import os
from django.core.management.base import BaseCommand
from invoice.models import TimeRecord
class Command(BaseCommand):
help = "Export ticket time to a CSV file"
def _jobs_board_tickets(self):
return (
732,
746,
747,
748,
749,
750,
751,
752,
753,
754,
755,
756,
757,
758,
759,
906,
976,
)
def _payment_plan_tickets(self):
return (
644,
)
def handle(self, *args, **options):
"""Export ticket time to a CSV file.
Columns:
- ticket number
- user name
- billable - True or False
- date started
- minutes
"""
tickets = self._jobs_board_tickets()
# tickets = self._payment_plan_tickets()
tickets = list(tickets)
tickets.sort()
file_name = '{}_ticket_time.csv'.format(
'_'.join([str(i) for i in tickets])
)
if os.path.exists(file_name):
raise Exception(
"Export file, '{}', already exists. "
"Cannot export time.".format(file_name)
)
with open(file_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel-tab')
for tr in TimeRecord.objects.filter(ticket__pk__in=tickets):
csv_writer.writerow([
tr.ticket.pk,
tr.user.username,
tr.billable,
tr.has_invoice_line,
tr.date_started,
tr._timedelta_minutes(),
])
print("Exported time to {}".format(file_name))
|
# -*- encoding: utf-8 -*-
import csv
import os
from django.core.management.base import BaseCommand
from invoice.models import TimeRecord
class Command(BaseCommand):
help = "Export ticket time to a CSV file"
def handle(self, *args, **options):
"""Export ticket time to a CSV file.
Columns:
- ticket number
- user name
- billable - True or False
- date started
- minutes
"""
tickets = (
732,
746,
747,
748,
749,
750,
751,
752,
753,
754,
755,
756,
757,
758,
759,
906
)
tickets = list(tickets)
tickets.sort()
file_name = '{}_ticket_time.csv'.format(
'_'.join([str(i) for i in tickets])
)
if os.path.exists(file_name):
raise Exception(
"Export file, '{}', already exists. "
"Cannot export time.".format(file_name)
)
with open(file_name, 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel-tab')
for tr in TimeRecord.objects.filter(ticket__pk__in=tickets):
csv_writer.writerow([
tr.ticket.pk,
tr.user.username,
tr.billable,
tr.date_started,
tr._timedelta_minutes(),
])
print("Exported time to {}".format(file_name))
|
apache-2.0
|
Python
|
8fa0ca6a307f7b23545d297d17f8eb05f037978f
|
fix one e2e test problem (#459)
|
kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts,kubeflow/kfserving-lts
|
test/e2e/utils.py
|
test/e2e/utils.py
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from kfserving import KFServingClient
KFServing = KFServingClient(config_file="~/.kube/config")
def wait_for_kfservice_ready(name, namespace='kfserving-ci-e2e-test', Timeout_seconds=600):
for _ in range(round(Timeout_seconds/10)):
time.sleep(10)
kfsvc_status = KFServing.get(name, namespace=namespace)
status = 'Unknown'
for condition in kfsvc_status['status'].get('conditions', {}):
if condition.get('type', '') == 'Ready':
status = condition.get('status', 'Unknown')
if status == 'True':
return
raise RuntimeError("Timeout to start the KFService.")
|
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from kfserving import KFServingClient
KFServing = KFServingClient(config_file="~/.kube/config")
def wait_for_kfservice_ready(name, namespace='kfserving-ci-e2e-test', Timeout_seconds=600):
for _ in range(round(Timeout_seconds/10)):
time.sleep(10)
kfsvc_status = KFServing.get(name, namespace=namespace)
for condition in kfsvc_status['status'].get('conditions', {}):
if condition.get('type', '') == 'Ready':
status = condition.get('status', 'Unknown')
if status == 'True':
return
raise RuntimeError("Timeout to start the KFService.")
|
apache-2.0
|
Python
|
49be60d27b5d5ce40c20847f79a8dd09f580a830
|
Update _var_dump.py
|
sha256/python-var-dump
|
var_dump/_var_dump.py
|
var_dump/_var_dump.py
|
from __future__ import print_function
import sys
try:
from types import NoneType
except:
NoneType = type(None)
if sys.version_info > (3,):
long = int
unicode = str
__author__ = "Shamim Hasnath"
__copyright__ = "Copyright 2013, Shamim Hasnath"
__license__ = "BSD License"
__version__ = "1.0.1"
TAB_SIZE = 4
def display(o, space, num, key, typ, proret):
st = ""
l = []
if key:
if typ is dict:
st += " " * space + "['%s'] => "
else:
st += " " * space + "%s => "
l.append(key)
elif space > 0:
st += " " * space + "[%d] => "
l.append(num)
else: # at the very start
st += "#%d "
l.append(num)
if type(o) in (tuple, list, dict, int, str, float, long, bool, NoneType, unicode):
st += "%s(%s) "
l.append(type(o).__name__)
if type(o) in (int, float, long, bool, NoneType):
l.append(o)
else:
l.append(len(o))
if type(o) in (str, unicode):
st += '"%s"'
l.append(o)
elif isinstance(o, object):
st += "object(%s) (%d)"
l.append(o.__class__.__name__)
l.append(len(o.__dict__))
if proret:
print(st % tuple(l))
return st % tuple(l)
def dump(o, space, num, key, typ, proret):
r = '';
if type(o) in (str, int, float, long, bool, NoneType, unicode):
r += display(o, space, num, key, typ, proret)
elif isinstance(o, object):
r += display(o, space, num, key, typ, proret)
num = 0
if type(o) in (tuple, list, dict):
typ = type(o) # type of the container of str, int, long, float etc
elif isinstance(o, object):
o = o.__dict__
typ = object
for i in o:
space += TAB_SIZE
if type(o) is dict:
r += dump(o[i], space, num, i, typ, proret)
else:
r += dump(i, space, num, '', typ, proret)
num += 1
space -= TAB_SIZE
return r
def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
dump(x, 0, i, '', object, True)
i += 1
def var_export(*obs):
"""
returns output as as string
"""
r = ''
i = 0
for x in obs:
r += dump(x, 0, i, '', object, False)
i += 1
return r
|
from __future__ import print_function
import sys
try:
from types import NoneType
except:
NoneType = type(None)
if sys.version_info > (3,):
long = int
unicode = str
__author__ = "Shamim Hasnath"
__copyright__ = "Copyright 2013, Shamim Hasnath"
__license__ = "BSD License"
__version__ = "1.0.1"
TAB_SIZE = 4
def display(o, space, num, key, typ, display):
st = ""
l = []
if key:
if typ is dict:
st += " " * space + "['%s'] => "
else:
st += " " * space + "%s => "
l.append(key)
elif space > 0:
st += " " * space + "[%d] => "
l.append(num)
else: # at the very start
st += "#%d "
l.append(num)
if type(o) in (tuple, list, dict, int, str, float, long, bool, NoneType, unicode):
st += "%s(%s) "
l.append(type(o).__name__)
if type(o) in (int, float, long, bool, NoneType):
l.append(o)
else:
l.append(len(o))
if type(o) in (str, unicode):
st += '"%s"'
l.append(o)
elif isinstance(o, object):
st += "object(%s) (%d)"
l.append(o.__class__.__name__)
l.append(len(o.__dict__))
if display:
print(st % tuple(l))
else:
return st % tuple(l)
def dump(o, space, num, key, typ, proret):
if type(o) in (str, int, float, long, bool, NoneType, unicode):
display(o, space, num, key, typ, proret)
elif isinstance(o, object):
display(o, space, num, key, typ, proret)
num = 0
if type(o) in (tuple, list, dict):
typ = type(o) # type of the container of str, int, long, float etc
elif isinstance(o, object):
o = o.__dict__
typ = object
for i in o:
space += TAB_SIZE
if type(o) is dict:
dump(o[i], space, num, i, typ, proret)
else:
dump(i, space, num, '', typ, proret)
num += 1
space -= TAB_SIZE
def var_dump(*obs):
"""
shows structured information of a object, list, tuple etc
"""
i = 0
for x in obs:
dump(x, 0, i, '', object, True)
i += 1
def var_export(*obs):
"""
returns output as as string
"""
r = ''
i = 0
for x in obs:
r += dump(x, 0, i, '', object, False)
i += 1
return r
|
bsd-3-clause
|
Python
|
206696e82f3e5be4a64e60abdb59ca51d2b1461e
|
Add a test for rgb+mp to verify that it continues to work.
|
deepmind/pysc2
|
pysc2/tests/multi_player_env_test.py
|
pysc2/tests/multi_player_env_test.py
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the multiplayer environment works."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2.agents import random_agent
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.tests import utils
class TestMultiplayerEnv(utils.TestCase):
def test_multi_player_env_features(self):
steps = 100
step_mul = 16
players = 2
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.zerg),
sc2_env.Agent(sc2_env.Race.terran)],
feature_screen_size=84,
feature_minimap_size=64,
step_mul=step_mul,
game_steps_per_episode=steps * step_mul // 2) as env:
agents = [random_agent.RandomAgent() for _ in range(players)]
run_loop.run_loop(agents, env, steps)
def test_multi_player_env_rgb(self):
steps = 100
step_mul = 16
players = 2
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.zerg),
sc2_env.Agent(sc2_env.Race.terran)],
rgb_screen_size=84,
rgb_minimap_size=64,
step_mul=step_mul,
game_steps_per_episode=steps * step_mul // 2) as env:
agents = [random_agent.RandomAgent() for _ in range(players)]
run_loop.run_loop(agents, env, steps)
if __name__ == "__main__":
absltest.main()
|
#!/usr/bin/python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the multiplayer environment works."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from future.builtins import range # pylint: disable=redefined-builtin
from pysc2.agents import random_agent
from pysc2.env import run_loop
from pysc2.env import sc2_env
from pysc2.tests import utils
class TestMultiplayerEnv(utils.TestCase):
def test_multi_player_env(self):
steps = 100
step_mul = 16
players = 2
with sc2_env.SC2Env(
map_name="Simple64",
players=[sc2_env.Agent(sc2_env.Race.zerg),
sc2_env.Agent(sc2_env.Race.terran)],
feature_screen_size=84,
feature_minimap_size=64,
step_mul=step_mul,
game_steps_per_episode=steps * step_mul // 2) as env:
agents = [random_agent.RandomAgent() for _ in range(players)]
run_loop.run_loop(agents, env, steps)
if __name__ == "__main__":
absltest.main()
|
apache-2.0
|
Python
|
48e3dab4ce044554b0ff606dea340ff8b6e5d928
|
Update __init__.py
|
EldarAliiev/python-edbo-connector,ZimGreen/edbo-connector-py
|
edbo_connector/__init__.py
|
edbo_connector/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
edbo_connector
Author: Eldar Aliiev
Email: e.aliiev@vnmu.edu.ua
"""
from .client import EDBOWebApiClient
__name__ = 'python-edbo-connector'
__author__ = 'Eldar Aliiev'
__copyright__ = 'Copyright 2018, National Pirogov Memorial Medical University, Vinnytsya'
__credits__ = ['Eldar Aliiev']
__license__ = 'MIT'
__version__ = '1.0.4-13'
__maintainer__ = 'Eldar Aliiev'
__email__ = 'e.aliiev@vnmu.edu.ua'
__status__ = 'Production'
__all__ = ['EDBOWebApiClient']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
edbo_connector
Author: Eldar Aliiev
Email: e.aliiev@vnmu.edu.ua
"""
from .client import EDBOWebApiClient
__name__ = 'python-edbo-connector'
__author__ = 'Eldar Aliiev'
__copyright__ = 'Copyright 2018, National Pirogov Memorial Medical University, Vinnytsya'
__credits__ = ['Eldar Aliiev']
__license__ = 'MIT'
__version__ = '1.0.4-12'
__maintainer__ = 'Eldar Aliiev'
__email__ = 'e.aliiev@vnmu.edu.ua'
__status__ = 'Production'
__all__ = ['EDBOWebApiClient']
|
mit
|
Python
|
c95c222384c2c0d887d435017196c9af4137d1b2
|
set numba parallel option
|
IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat,IntelLabs/hpat
|
hpat/__init__.py
|
hpat/__init__.py
|
from __future__ import print_function, division, absolute_import
import numba
from numba import *
from .compiler import add_hpat_stages
set_user_pipeline_func(add_hpat_stages)
del add_hpat_stages
def jit(signature_or_function=None, **options):
# set nopython by default
if 'nopython' not in options:
options['nopython'] = True
options['parallel'] = True
return numba.jit(signature_or_function, **options)
|
from __future__ import print_function, division, absolute_import
import numba
from numba import *
from .compiler import add_hpat_stages
set_user_pipeline_func(add_hpat_stages)
del add_hpat_stages
def jit(signature_or_function=None, **options):
# set nopython by default
if 'nopython' not in options:
options['nopython'] = True
return numba.jit(signature_or_function, **options)
|
bsd-2-clause
|
Python
|
77651861fc5a27d1d62293e3bc66d62ae193221d
|
add tolerance option to F.sqrt
|
niboshi/chainer,chainer/chainer,ronekko/chainer,niboshi/chainer,niboshi/chainer,hvy/chainer,okuta/chainer,niboshi/chainer,wkentaro/chainer,hvy/chainer,jnishi/chainer,wkentaro/chainer,hvy/chainer,ktnyt/chainer,ktnyt/chainer,jnishi/chainer,hvy/chainer,chainer/chainer,keisuke-umezawa/chainer,chainer/chainer,aonotas/chainer,wkentaro/chainer,keisuke-umezawa/chainer,rezoo/chainer,jnishi/chainer,ktnyt/chainer,jnishi/chainer,chainer/chainer,okuta/chainer,anaruse/chainer,okuta/chainer,wkentaro/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,pfnet/chainer,tkerola/chainer,okuta/chainer,ktnyt/chainer
|
tests/chainer_tests/functions_tests/math_tests/test_sqrt.py
|
tests/chainer_tests/functions_tests/math_tests/test_sqrt.py
|
import unittest
import numpy
import chainer.functions as F
from chainer import testing
# sqrt
def make_data(shape, dtype):
x = numpy.random.uniform(0.1, 5, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(
F.sqrt,
make_data=make_data,
backward_options={'atol': 1e-3, 'rtol': 1e-3},
)
class TestSqrt(unittest.TestCase):
pass
# rsqrt
def rsqrt(x):
return numpy.reciprocal(numpy.sqrt(x))
class TestRsqrt(unittest.TestCase):
def test_rsqrt(self):
x = numpy.random.uniform(0.1, 5, (3, 2)).astype(numpy.float32)
testing.assert_allclose(F.rsqrt(x).data, rsqrt(x))
testing.run_module(__name__, __file__)
|
import unittest
import numpy
import chainer.functions as F
from chainer import testing
# sqrt
def make_data(shape, dtype):
x = numpy.random.uniform(0.1, 5, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
ggx = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy, ggx
@testing.unary_math_function_unittest(F.sqrt, make_data=make_data)
class TestSqrt(unittest.TestCase):
pass
# rsqrt
def rsqrt(x):
return numpy.reciprocal(numpy.sqrt(x))
class TestRsqrt(unittest.TestCase):
def test_rsqrt(self):
x = numpy.random.uniform(0.1, 5, (3, 2)).astype(numpy.float32)
testing.assert_allclose(F.rsqrt(x).data, rsqrt(x))
testing.run_module(__name__, __file__)
|
mit
|
Python
|
cc17390eada091da34fed92ee7e2090adc1fa87e
|
Fix for `plot_field` function failing on non-square grids #666
|
opesci/devito,opesci/devito
|
examples/cfd/tools.py
|
examples/cfd/tools.py
|
from mpl_toolkits.mplot3d import Axes3D # noqa
import numpy as np
from matplotlib import pyplot, cm
def plot_field(field, xmax=2., ymax=2., zmax=None, view=None, linewidth=0):
"""Utility plotting routine for 2D data
:param field: Numpy array with field data to plot
:param xmax: (Optional) Length of the x-axis
:param ymax: (Optional) Length of the y-axis
:param view: (Optional) View point to intialise
"""
x_coord = np.linspace(0, xmax, field.shape[0])
y_coord = np.linspace(0, ymax, field.shape[1])
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x_coord, y_coord, indexing='ij')
ax.plot_surface(X, Y, field[:], cmap=cm.viridis, rstride=1, cstride=1,
linewidth=linewidth, antialiased=False)
# Enforce axis measures and set view if given
ax.set_xlim(0., xmax)
ax.set_ylim(0., ymax)
if zmax is not None:
ax.set_zlim(1., zmax)
if view is not None:
ax.view_init(*view)
# Label axis
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
pyplot.show()
def init_hat(field, dx, dy, value=2., bgvalue=1.):
"""Set "hat function" initial condition on an array:
u(.5<=x<=1 && .5<=y<=1 ) is 2
:param field: Numpy array with field data to plot
:param dx: Spacing in the x-dimension
:param dy: Spacing in the y-dimension
:param value: Value of the top part of the function, default=2.
:param bgvalue: Background value for the bottom of the function, default=1.
"""
field[:] = bgvalue
field[int(.5 / dx):int(1 / dx + 1), int(.5 / dy):int(1 / dy + 1)] = value
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def fin_bump(x):
if x <= 0 or x >= 1:
return 0
else:
return 100*np.exp(-1./(x-np.power(x, 2.)))
def init_smooth(field, dx, dy):
nx, ny = field.shape
for ix in range(nx):
for iy in range(ny):
x = ix * dx
y = iy * dy
field[ix, iy] = fin_bump(x/1.5) * fin_bump(y/1.5) + 1.
|
from mpl_toolkits.mplot3d import Axes3D # noqa
import numpy as np
from matplotlib import pyplot, cm
def plot_field(field, xmax=2., ymax=2., zmax=None, view=None, linewidth=0):
"""Utility plotting routine for 2D data
:param field: Numpy array with field data to plot
:param xmax: (Optional) Length of the x-axis
:param ymax: (Optional) Length of the y-axis
:param view: (Optional) View point to intialise
"""
x_coord = np.linspace(0, xmax, field.shape[0])
y_coord = np.linspace(0, ymax, field.shape[1])
fig = pyplot.figure(figsize=(11, 7), dpi=100)
ax = fig.gca(projection='3d')
X, Y = np.meshgrid(x_coord, y_coord)
ax.plot_surface(X, Y, field[:], cmap=cm.viridis, rstride=1, cstride=1,
linewidth=linewidth, antialiased=False)
# Enforce axis measures and set view if given
ax.set_xlim(0., xmax)
ax.set_ylim(0., ymax)
if zmax is not None:
ax.set_zlim(1., zmax)
if view is not None:
ax.view_init(*view)
# Label axis
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
pyplot.show()
def init_hat(field, dx, dy, value=2., bgvalue=1.):
"""Set "hat function" initial condition on an array:
u(.5<=x<=1 && .5<=y<=1 ) is 2
:param field: Numpy array with field data to plot
:param dx: Spacing in the x-dimension
:param dy: Spacing in the y-dimension
:param value: Value of the top part of the function, default=2.
:param bgvalue: Background value for the bottom of the function, default=1.
"""
field[:] = bgvalue
field[int(.5 / dx):int(1 / dx + 1), int(.5 / dy):int(1 / dy + 1)] = value
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
def fin_bump(x):
if x <= 0 or x >= 1:
return 0
else:
return 100*np.exp(-1./(x-np.power(x, 2.)))
def init_smooth(field, dx, dy):
nx, ny = field.shape
for ix in range(nx):
for iy in range(ny):
x = ix * dx
y = iy * dy
field[ix, iy] = fin_bump(x/1.5) * fin_bump(y/1.5) + 1.
|
mit
|
Python
|
173317003a59afb639e6f4f5d5eca41a1f390979
|
Revise q05 to successfully use partial derivatives of u and v for gradient descent of E (error).
|
JMill/edX-Learning-From-Data-Explanations
|
hw05/hw05ex05.py
|
hw05/hw05ex05.py
|
# dE/du (u e^v - 2v e^(-u))^2 = 2 (u e^v - 2v e^(-u))(e^v + 2v e^(-u))
#from decimal import Decimal
from math import sqrt, exp, fabs #natural exponent, e**x. and absolute value
def calcEwrtu(u,v):
'''
Given u and v, the hypothesis and the target function,
return the partial deriv w.r.t. u for gradient descent of the error.
'''
return 2 * ( u*exp(v) - 2*v*exp(-u) ) * ( exp(v) + 2*v*exp(-u) )
def calcEwrtv(u,v):
'''
Given u and v, the hypothesis and the target function,
return the partial deriv w.r.t. v for gradient descent of the error.
'''
return 2 * ( u * exp(v) - 2*v*exp(-u) ) * ( u*exp(v) - 2*exp(-u))
def calcE(u,v):
return ( u*exp(v) - 2.*v*exp(-u) )**2
def q05():
i = 0
eta = 0.1 # u"\u03B7"
u = 1.
v = 1.
#E = float(10^(-14))
#E = 10^(-14)
#E = Decimal(0.0000000000001)
#E = 0.0000000000001
E_threshold = 10e-14
E = 99999.
while True:
if E < E_threshold:
print E, '<', E_threshold, ' in', i, 'iterations'
break
else:
dE_du = calcEwrtu(u,v)
dE_dv = calcEwrtv(u,v)
u = u - eta * dE_du
v = v - eta * dE_dv
E = calcE(u,v)
#print 'E:', E, 'u:', u, 'v:', v, 'iter:', i
i+=1
return u, v, E, i
print q05()
|
# dE/du (u e^v - 2v e^(-u))^2 = 2 (u e^v - 2v e^(-u))(e^v + 2v e^(-u))
#from decimal import Decimal
from math import exp #natural exponent, e**x
def calcE(u,v):
return 2 * ( u*exp(v) - 2*v*exp(-u) ) * ( exp(v) + 2*v*exp(-u) )
i = 0
eta = 0.1 # u"\u03B7"
#E = float(10^(-14))
#E = 10^(-14)
#E = Decimal(0.0000000000001)
#E = 0.0000000000001
E_threshold = 10e-14
print calcE(1,1)
'''
while True:
if E < E_threshold:
print E, '<', E_threshold, ' in', i, 'iterations'
break
else:
'''
|
apache-2.0
|
Python
|
c0eedfeca0e19a65e4484e63790319cf18433343
|
change optimizer in example
|
keras-team/keras,nebw/keras,kemaswill/keras,relh/keras,kuza55/keras,dolaameng/keras,DeepGnosis/keras,keras-team/keras
|
examples/mnist_mlp.py
|
examples/mnist_mlp.py
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
'''Trains a simple deep NN on the MNIST dataset.
Gets to 98.40% test accuracy after 20 epochs
(there is *a lot* of margin for parameter tuning).
2 seconds per epoch on a K520 GPU.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import np_utils
batch_size = 128
nb_classes = 10
nb_epoch = 20
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
score = model.evaluate(X_test, Y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
|
apache-2.0
|
Python
|
a5be3784d0cfce42c0cdb6bc83b37a07dff7a164
|
Implement accuracy on GPU
|
hidenori-t/chainer,hvy/chainer,niboshi/chainer,sinhrks/chainer,cupy/cupy,bayerj/chainer,woodshop/complex-chainer,okuta/chainer,okuta/chainer,benob/chainer,nushio3/chainer,kikusu/chainer,nushio3/chainer,truongdq/chainer,ttakamura/chainer,jnishi/chainer,wkentaro/chainer,ktnyt/chainer,aonotas/chainer,masia02/chainer,wkentaro/chainer,woodshop/chainer,ttakamura/chainer,Kaisuke5/chainer,sou81821/chainer,muupan/chainer,tereka114/chainer,elviswf/chainer,jnishi/chainer,t-abe/chainer,okuta/chainer,delta2323/chainer,wkentaro/chainer,kikusu/chainer,truongdq/chainer,umitanuki/chainer,rezoo/chainer,hvy/chainer,ktnyt/chainer,cupy/cupy,chainer/chainer,kashif/chainer,jfsantos/chainer,hvy/chainer,kiyukuta/chainer,benob/chainer,niboshi/chainer,cupy/cupy,muupan/chainer,keisuke-umezawa/chainer,AlpacaDB/chainer,jnishi/chainer,anaruse/chainer,niboshi/chainer,t-abe/chainer,ikasumi/chainer,minhpqn/chainer,ktnyt/chainer,chainer/chainer,tscohen/chainer,yanweifu/chainer,wavelets/chainer,laysakura/chainer,keisuke-umezawa/chainer,ktnyt/chainer,keisuke-umezawa/chainer,cemoody/chainer,1986ks/chainer,tkerola/chainer,kuwa32/chainer,ytoyama/yans_chainer_hackathon,AlpacaDB/chainer,sinhrks/chainer,chainer/chainer,jnishi/chainer,niboshi/chainer,keisuke-umezawa/chainer,ysekky/chainer,tigerneil/chainer,hvy/chainer,cupy/cupy,wkentaro/chainer,okuta/chainer,chainer/chainer,ronekko/chainer,pfnet/chainer
|
chainer/functions/accuracy.py
|
chainer/functions/accuracy.py
|
import numpy
from pycuda import gpuarray
from chainer import cuda, Function
class Accuracy(Function):
"""Compute accuracy within minibatch."""
def forward_cpu(self, inputs):
y, t = inputs
y = y.reshape(y.shape[0], y.size / y.shape[0]) # flatten
pred = y.argmax(axis=1)
return (pred == t).mean(dtype=numpy.float32),
def forward_gpu(self, inputs):
x, t = inputs
fragments = cuda.empty((x.shape[0],), dtype=numpy.int8)
cuda.elementwise(
'char* fragments, const float* x, const int* t, int c',
'''
x += i * c;
float maxval = x[0];
int argmax = 0;
for (int j = 1; j < c; ++j) {
if (maxval < x[j]) {
maxval = x[j];
argmax = j;
}
}
fragments[i] = argmax == t[i];
''', 'accuracy_fwd_map')(fragments, x, t, x.shape[1])
y = gpuarray.sum(fragments, dtype=numpy.float32)
y /= x.shape[0]
return y,
def accuracy(y, t):
return Accuracy()(y, t)
|
import numpy
from chainer import cuda, Function
class Accuracy(Function):
"""Compute accuracy within minibatch."""
def forward_cpu(self, inputs):
y, t = inputs
y = y.reshape(y.shape[0], y.size / y.shape[0]) # flatten
pred = y.argmax(axis=1)
return (pred == t).mean(dtype=numpy.float32),
def forward_gpu(self, inputs):
# Fallback to CPU
# TODO(beam2d): Pure GPU version
accuracy, = self.forward_cpu((a.get() for a in inputs))
return cuda.to_gpu_async(numpy.array(accuracy)),
def accuracy(y, t):
return Accuracy()(y, t)
|
mit
|
Python
|
f3c5a477141e5f3845641111f775ea90398be633
|
Add numpy.ndarray and cupy.ndarray as input type
|
hvy/chainer,wkentaro/chainer,tkerola/chainer,jnishi/chainer,keisuke-umezawa/chainer,chainer/chainer,okuta/chainer,niboshi/chainer,chainer/chainer,wkentaro/chainer,chainer/chainer,aonotas/chainer,niboshi/chainer,hvy/chainer,ktnyt/chainer,ronekko/chainer,rezoo/chainer,jnishi/chainer,ktnyt/chainer,ktnyt/chainer,jnishi/chainer,hvy/chainer,wkentaro/chainer,jnishi/chainer,keisuke-umezawa/chainer,keisuke-umezawa/chainer,okuta/chainer,okuta/chainer,keisuke-umezawa/chainer,chainer/chainer,hvy/chainer,wkentaro/chainer,anaruse/chainer,ktnyt/chainer,okuta/chainer,niboshi/chainer,niboshi/chainer,pfnet/chainer
|
chainer/functions/math/erf.py
|
chainer/functions/math/erf.py
|
import math
import warnings
import numpy
import chainer
from chainer import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_erf_cpu = None
class Erf(function_node.FunctionNode):
@property
def label(self):
return 'erf'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _erf_cpu
if _erf_cpu is None:
try:
from scipy import special
_erf_cpu = special.erf
except ImportError:
warnings.warn(
"SciPy is not available. Forward computation of erf in CPU"
" can be slow without SciPy.")
_erf_cpu = numpy.vectorize(math.erf)
self.retain_inputs((0,))
return utils.force_array(_erf_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erf(x)',
'elementwise_erf',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return 2 / numpy.pi ** 0.5 * chainer.functions.exp(-x ** 2) * gy[0],
def erf(x):
"""Elementwise error function.
.. note::
Forward computation in CPU can be slow if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erf().apply((x,))[0]
|
import math
import warnings
import numpy
import chainer
from chainer import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
_erf_cpu = None
class Erf(function_node.FunctionNode):
@property
def label(self):
return 'erf'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, x):
global _erf_cpu
if _erf_cpu is None:
try:
from scipy import special
_erf_cpu = special.erf
except ImportError:
warnings.warn(
"SciPy is not available. Forward computation of erf in CPU"
" can be slow without SciPy.")
_erf_cpu = numpy.vectorize(math.erf)
self.retain_inputs((0,))
return utils.force_array(_erf_cpu(x[0]), dtype=x[0].dtype),
def forward_gpu(self, x):
self.retain_inputs((0,))
return cuda.elementwise(
'T x', 'T y',
'y = erf(x)',
'elementwise_erf',
)(x[0]),
def backward(self, indexes, gy):
x = self.get_retained_inputs()[0]
return 2 / numpy.pi ** 0.5 * chainer.functions.exp(-x ** 2) * gy[0],
def erf(x):
"""Elementwise error function.
.. note::
Forward computation in CPU can be slow if
`SciPy <https://www.scipy.org/>`_ is not available.
Args:
x (~chainer.Variable): Input variable.
Returns:
~chainer.Variable: Output variable.
"""
return Erf().apply((x,))[0]
|
mit
|
Python
|
8c4a54690cb99b63a9cf825e2958bb2b48cd7e5d
|
Complete lc009_palindrome_number.py
|
bowen0701/algorithms_data_structures
|
lc009_palindrome_number.py
|
lc009_palindrome_number.py
|
"""Leetcode 9. Palindrome Number
Easy
Determine whether an integer is a palindrome. An integer is a palindrome when
it reads the same backward as forward.
Example 1:
Input: 121
Output: true
Example 2:
Input: -121
Output: false
Explanation: From left to right, it reads -121. From right to left, it becomes
121-. Therefore it is not a palindrome.
Example 3:
Input: 10
Output: false
Explanation: Reads 01 from right to left. Therefore it is not a palindrome.
Follow up:
Coud you solve it without converting the integer to a string?
"""
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
x_str = str(x)
return x_str == x_str[::-1]
class Solution2(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
if x < 0:
return False
ls = []
while x > 0:
div, mod = divmod(x, 10)
x = div
ls.append(mod)
return ls == ls[::-1]
def main():
x = 121 # Ans: True.
print(Solution().isPalindrome(x))
print(Solution2().isPalindrome(x))
x = -121 # Ans: False.
print(Solution().isPalindrome(x))
print(Solution2().isPalindrome(x))
x = 10 # Ans: False.
print(Solution().isPalindrome(x))
print(Solution2().isPalindrome(x))
if __name__ == '__main__':
main()
|
"""Leetcode 9. Palindrome Number
Easy
Determine whether an integer is a palindrome. An integer is a palindrome when
it reads the same backward as forward.
Example 1:
Input: 121
Output: true
Example 2:
Input: -121
Output: false
Explanation: From left to right, it reads -121. From right to left, it becomes
121-. Therefore it is not a palindrome.
Example 3:
Input: 10
Output: false
Explanation: Reads 01 from right to left. Therefore it is not a palindrome.
"""
class Solution(object):
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
pass
def main():
pass
if __name__ == '__main__':
main()
|
bsd-2-clause
|
Python
|
9e9f831a757af01cc3b1edfe590e27f7ab53c2ce
|
define interfaces
|
mfcloud/python-zvm-sdk,mfcloud/python-zvm-sdk,mfcloud/python-zvm-sdk,mfcloud/python-zvm-sdk
|
zvmsdk/vmops.py
|
zvmsdk/vmops.py
|
from log import LOG
import utils as zvmutils
VMOPS = None
def _get_vmops():
if VMOPS is None:
VMOPS = VMOps()
return VMOPS
def run_instance(instance_name, image_id, cpu, memory,
login_password, ip_addr):
"""Deploy and provision a virtual machine.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
:image_id: Image ID
:cpu: vcpu
:memory: memory
:login_password: login password
:ip_addr: ip address
"""
pass
def terminate_instance(instance_name):
"""Destroy a virtual machine.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
"""
pass
def start_instance(instance_name):
"""Power on a virtual machine.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
"""
_get_vmops()._power_state(instance_name, "PUT", "on")
def stop_instance(instance_name):
"""Shutdown a virtual machine.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
"""
pass
def create_volume(volume_name, size):
"""Create a volume.
Input parameters:
:volume_name: volume name
:size: size
"""
pass
def delete_volume(volume_name):
"""Create a volume.
Input parameters:
:volume_name: volume name
"""
pass
def attach_volume(instance_name, volume_name):
"""Create a volume.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
:volume_name: volume name
"""
pass
def capture_instance(instance_name, image_name):
"""Caputre a virtual machine image.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
:image_name: Image name
"""
pass
def delete_image(image_name):
"""Delete image.
Input parameters:
:image_name: Image name
"""
pass
def detach_volume(instance_name, volume_name):
"""Create a volume.
Input parameters:
:instance_name: USERID of the instance, last 8 if length > 8
:volume_name: volume name
"""
pass
class VMOps(object):
def __init__(self):
self._xcat_url = zvmutils.get_xcat_url()
def _power_state(self, instance_name, method, state):
"""Invoke xCAT REST API to set/get power state for a instance."""
body = [state]
url = self._xcat_url.rpower('/' + instance_name)
return zvmutils.xcat_request(method, url, body)
def get_power_state(self, instance_name):
"""Get power status of a z/VM instance."""
LOG.debug('Query power stat of %s' % instance_name)
res_dict = self._power_state(instance_name, "GET", "stat")
@zvmutils.wrap_invalid_xcat_resp_data_error
def _get_power_string(d):
tempstr = d['info'][0][0]
return tempstr[(tempstr.find(':') + 2):].strip()
power_stat = _get_power_string(res_dict)
return power_stat
|
from log import LOG
import utils as zvmutils
class VMOps(object):
def __init__(self):
self._xcat_url = zvmutils.get_xcat_url()
def _power_state(self, instance_name, method, state):
"""Invoke xCAT REST API to set/get power state for a instance."""
body = [state]
url = self._xcat_url.rpower('/' + instance_name)
return zvmutils.xcat_request(method, url, body)
def get_power_state(self, instance_name):
"""Get power status of a z/VM instance."""
LOG.debug('Query power stat of %s' % instance_name)
res_dict = self._power_state(instance_name, "GET", "stat")
@zvmutils.wrap_invalid_xcat_resp_data_error
def _get_power_string(d):
tempstr = d['info'][0][0]
return tempstr[(tempstr.find(':') + 2):].strip()
power_stat = _get_power_string(res_dict)
return power_stat
|
apache-2.0
|
Python
|
dfaa289465a2cdc837884718624b9a8a65e511b3
|
Improve proxy rax example
|
Dentosal/python-sc2
|
examples/proxy_rax.py
|
examples/proxy_rax.py
|
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
class ProxyRaxBot(sc2.BotAI):
async def on_step(self, state, iteration):
cc = self.units(COMMANDCENTER)
if not cc.exists:
target = self.known_enemy_structures.random_or(self.enemy_start_locations[0]).position
for unit in self.workers | self.units(MARINE):
await self.do(unit.attack(target))
return
else:
cc = cc.first
if self.units(MARINE).idle.amount > 15 and iteration % 50 == 1:
target = self.known_enemy_structures.random_or(self.enemy_start_locations[0]).position
for marine in self.units(MARINE).idle:
await self.do(marine.attack(target))
if self.can_afford(SCV) and self.workers.amount < 16 and cc.noqueue:
await self.do(cc.train(SCV))
elif self.supply_left < (2 if self.units(BARRACKS).amount < 3 else 4):
if self.can_afford(SUPPLYDEPOT):
await self.build(SUPPLYDEPOT, near=cc.position.towards(self.game_info.map_center, 5))
elif self.units(BARRACKS).amount < 3 or (self.minerals > 400 and self.units(BARRACKS).amount < 5):
if self.can_afford(BARRACKS):
p = self.game_info.map_center.towards(self.enemy_start_locations[0], 25)
await self.build(BARRACKS, near=p)
for rax in self.units(BARRACKS).ready.noqueue:
if not self.can_afford(MARINE):
break
await self.do(rax.train(MARINE))
for scv in self.units(SCV).idle:
await self.do(scv.gather(self.state.mineral_field.closest_to(cc)))
def main():
sc2.run_game(sc2.maps.get("Sequencer LE"), [
Bot(Race.Terran, ProxyRaxBot()),
Computer(Race.Zerg, Difficulty.Hard)
], realtime=False)
if __name__ == '__main__':
main()
|
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
class ProxyRaxBot(sc2.BotAI):
async def on_step(self, state, iteration):
cc = self.units(COMMANDCENTER)
if not cc.exists:
target = self.known_enemy_structures.random_or(self.enemy_start_locations[0]).position
for unit in self.workers | self.units(MARINE):
await self.do(unit.attack(target))
return
else:
cc = cc.first
if self.units(MARINE).idle.amount > 15 and iteration % 50 == 1:
target = self.known_enemy_structures.random_or(self.enemy_start_locations[0]).position
for marine in self.units(MARINE).idle:
await self.do(marine.attack(target))
if self.can_afford(SCV) and self.workers.amount < 16 and cc.noqueue:
await self.do(cc.train(SCV))
elif self.supply_left < 2:
if self.can_afford(SUPPLYDEPOT):
await self.build(SUPPLYDEPOT, near=cc.position.towards(self.game_info.map_center, 5))
elif self.units(BARRACKS).amount < 3 or self.minerals > 400:
if self.can_afford(BARRACKS):
p = self.game_info.map_center.towards(self.enemy_start_locations[0], 25)
await self.build(BARRACKS, near=p)
for rax in self.units(BARRACKS).ready.noqueue:
if not self.can_afford(MARINE):
break
await self.do(rax.train(MARINE))
for scv in self.units(SCV).idle:
await self.do(scv.gather(self.state.mineral_field.closest_to(cc)))
def main():
sc2.run_game(sc2.maps.get("Sequencer LE"), [
Bot(Race.Terran, ProxyRaxBot()),
Computer(Race.Zerg, Difficulty.Hard)
], realtime=True)
if __name__ == '__main__':
main()
|
mit
|
Python
|
a4b7878880f5a8d275129949179b4b30044f0c86
|
Update __init__.py
|
jason-neal/eniric,jason-neal/eniric
|
eniric_scripts/__init__.py
|
eniric_scripts/__init__.py
|
__all__ = [
"bary_shift_atmmodel",
"phoenix_precision",
"precision_four_panel",
"split_atmmodel",
]
|
__all__ = [
"bary_shift_atmmodel",
"phoenix_precision.py",
"precision_four_panel",
"split_atmmodel",
]
|
mit
|
Python
|
10d5e90e65e792d0fae3879dd5f512bdc7b95da6
|
Add missing dependency to perl-xml-parser (#12903)
|
iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack
|
var/spack/repos/builtin/packages/perl-xml-parser/package.py
|
var/spack/repos/builtin/packages/perl-xml-parser/package.py
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class PerlXmlParser(PerlPackage):
"""XML::Parser - A perl module for parsing XML documents"""
homepage = "http://search.cpan.org/perldoc/XML::Parser"
url = "http://search.cpan.org/CPAN/authors/id/T/TO/TODDR/XML-Parser-2.44.tar.gz"
version('2.44', 'af4813fe3952362451201ced6fbce379')
depends_on('expat')
depends_on('perl-libwww-perl', type=('build', 'run'))
def configure_args(self):
args = []
p = self.spec['expat'].prefix.lib
args.append('EXPATLIBPATH={0}'.format(p))
p = self.spec['expat'].prefix.include
args.append('EXPATINCPATH={0}'.format(p))
return args
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
#
from spack import *
class PerlXmlParser(PerlPackage):
"""XML::Parser - A perl module for parsing XML documents"""
homepage = "http://search.cpan.org/perldoc/XML::Parser"
url = "http://search.cpan.org/CPAN/authors/id/T/TO/TODDR/XML-Parser-2.44.tar.gz"
version('2.44', 'af4813fe3952362451201ced6fbce379')
depends_on('expat')
def configure_args(self):
args = []
p = self.spec['expat'].prefix.lib
args.append('EXPATLIBPATH={0}'.format(p))
p = self.spec['expat'].prefix.include
args.append('EXPATINCPATH={0}'.format(p))
return args
|
lgpl-2.1
|
Python
|
ee75d9530bb6b9e409449c8e9d5ffb3a3578f5d8
|
Fix not coloring for new repositories
|
Brickstertwo/git-commands
|
bin/commands/stateextensions/status.py
|
bin/commands/stateextensions/status.py
|
import os
import re
import subprocess
from colorama import Fore
def title():
return 'status'
def accent(**kwargs):
new_repository = kwargs.get('new_repository', False)
show_color = kwargs.get('show_color', 'always')
if new_repository:
status_title = '{no_color}({green}master{no_color})'.format(no_color=Fore.RESET, green=Fore.GREEN)
else:
status_title = subprocess.check_output(
('git', '-c', 'color.status=' + show_color, 'status', '--branch', '--short')
).splitlines()[0]
status_title = re.match('.*##.*? (.*)', status_title).group(1)
status_title = '{}({})'.format(Fore.RESET, status_title)
return status_title
def get(**kwargs):
new_repository = kwargs.get('new_repository', False)
show_color = kwargs.get('show_color', 'always')
show_clean_message = kwargs.get('show_clean_message', True)
if new_repository:
# check if status is empty
status_output = subprocess.check_output(['git', '-c', 'color.status=' + show_color, 'status', '--short'])
if not status_output:
status_output = 'Empty repository'
else:
status_output = subprocess.check_output(['git', '-c', 'color.status=' + show_color, 'status', '--short', '--untracked-files=all'])
if not status_output and show_clean_message:
status_output = 'nothing to commit, working directory is clean' + os.linesep
return status_output
|
import os
import re
import subprocess
from colorama import Fore
def title():
return 'status'
def accent(**kwargs):
new_repository = kwargs.get('new_repository', False)
show_color = kwargs.get('show_color', 'always')
if new_repository:
status_title = '{no_color}({green}master{no_color})'.format(no_color=Fore.RESET, green=Fore.GREEN)
else:
status_title = subprocess.check_output(
('git', '-c', 'color.status=' + show_color, 'status', '--branch', '--short')
).splitlines()[0]
status_title = re.match('.*##.*? (.*)', status_title).group(1)
status_title = '{}({})'.format(Fore.RESET, status_title)
return status_title
def get(**kwargs):
new_repository = kwargs.get('new_repository', False)
show_color = kwargs.get('show_color', 'always')
show_clean_message = kwargs.get('show_clean_message', True)
if new_repository:
# check if status is empty
status_output = subprocess.check_output(['git', 'status', '--short'])
if not status_output:
status_output = 'Empty repository'
else:
status_output = subprocess.check_output(['git', '-c', 'color.status=' + show_color, 'status', '--short', '--untracked-files=all'])
if not status_output and show_clean_message:
status_output = 'nothing to commit, working directory is clean' + os.linesep
return status_output
|
mit
|
Python
|
c71730cf4f3d8937f0ef1608bf670c28ec44eb0b
|
Complete and prettified
|
KristinCovert/coding101
|
Challenge3.py
|
Challenge3.py
|
# Exercise 3 (and Solution)
# Take a list, say for example this one:
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# and write a program that prints out all the elements of the list that are less than 5.
# Extras:
# Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.
# Write this in one line of Python.
# Ask the user for a number and return a list that contains only elements from the original list a that are smaller than that number given by the user.
# OUR PLAN: ask user to give 10 numbers (check to see if it is an actual interger) - for each additional number at it to a list, then check the list to see if any of the numbers are less than 5
print ("\nLet's play a game!\n I'll ask for 10 numbers.")
# create a list and start a counter to get desired numbers
list = []
counter = 0
# ask for 10 numbers and add each one to the empty list
while counter < 10:
user_number = raw_input("Please give a number:\n")
try:
val = int(user_number)
list.append(int(user_number))
counter = counter + 1
# if the value entered is not a number remind people about MATH :)
except ValueError:
print("\nThat's not an int! Pleases use real numbers only. That is REAL numbers from math class.")
# Parot back the number list to the user
print ("\nThanks! Here are your numbers:%s" % (list))
# make a function that can judge which numbers in the list are less than a new value that the user gives
def get_less_than_value():
# get the new value to determine which numbers are less than it
start_value = raw_input("\nNow give a new number and I will tell you which number or numbers in your list are smaller. ")
# to evaluate the numbers they have to be integers so make them so
try:
start_value = int(start_value)
# if the number is not a interger let the user know to try again and recall the function to start again
except ValueError:
print("\nSoooo, remember we need real numbers here. Try again!")
get_less_than_value()
# create the list to hold the values less than
list_less_than = []
# evaluate each number and add it to the empty list
for values in list:
if values < start_value:
list_less_than.append(values)
# print (list_less_than) - use to check if list built correctly
# using the length of list let the user know what numbers are less or if there are no numbers that are less than
if len(list_less_than) < 1 :
print("\nThere are no numbers in your list less than %s " % (start_value))
elif len(list_less_than) > 1 :
print (("\nThe numbers less than are: %s") % (list_less_than))
get_less_than_value()
|
# Exercise 3 (and Solution)
# Take a list, say for example this one:
# a = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
# and write a program that prints out all the elements of the list that are less than 5.
# Extras:
# Instead of printing the elements one by one, make a new list that has all the elements less than 5 from this list in it and print out this new list.
# Write this in one line of Python.
# Ask the user for a number and return a list that contains only elements from the original list a that are smaller than that number given by the user.
# OUR PLAN: ask user to give 10 numbers (check to see if it is an actual interger) - for each additional number at it to a list, then check the list to see if any of the numbers are less than 5
print ("\nLet's play a game!\n I'll ask for 10 numbers.")
list = []
counter = 0
while counter < 10:
user_number = raw_input("Please give us a number:\n")
try:
val = int(user_number)
list.append(user_number)
counter = counter + 1
except ValueError:
print("That's not an int! Pleases use real numbers only. That is REAL numbers from math class.")
print list
def get_less_than_value():
start_value = raw_input("\nNow tell me a new number and I will tell you which one in your list are smaller. ")
try:
is_an_integer = int(start_value)
###MAKE A NEW FUNCTION HERE TO SPLIT list based on less than - us a for loop
except ValueError:
print("Soooo, remember we need real numbers here. Try again!")
get_less_than_value()
get_less_than_value()
|
apache-2.0
|
Python
|
d4ee599fe9cd88315d129e036fb034111bfc2272
|
Add types to common url parameters (#50000)
|
thaim/ansible,thaim/ansible
|
lib/ansible/utils/module_docs_fragments/url.py
|
lib/ansible/utils/module_docs_fragments/url.py
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, John Barker <gundalow@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
type: str
force:
description:
- If C(yes) do not get a cached copy.
aliases:
- thirsty
type: bool
default: no
http_agent:
description:
- Header to identify as, generally appears in web server logs.
type: str
default: ansible-httpget
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated.
- This should only be used on personally controlled sites using self-signed certificates.
type: bool
default: yes
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without I(url_password) for sites that allow empty passwords
type: str
url_password:
description:
- The password for use in HTTP basic authentication.
- If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
type: str
force_basic_auth:
description:
- Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
type: bool
default: no
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client authentication.
- This file can also include the key as well, and if the key is included, C(client_key) is not required.
type: str
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL client authentication.
- If C(client_cert) contains both the certificate and key, this option is not required.
type: str
'''
|
# (c) 2018, John Barker<gundalow@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = """
options:
url:
description:
- HTTP, HTTPS, or FTP URL in the form (http|https|ftp)://[user[:pass]]@host.domain[:port]/path
force:
description:
- If C(yes) do not get a cached copy.
aliases:
- thirsty
type: bool
default: no
http_agent:
description:
- Header to identify as, generally appears in web server logs.
default: ansible-httpget
use_proxy:
description:
- If C(no), it will not use a proxy, even if one is defined in an environment variable on the target hosts.
type: bool
default: yes
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
default: yes
type: bool
url_username:
description:
- The username for use in HTTP basic authentication.
- This parameter can be used without I(url_password) for sites that allow empty passwords
url_password:
description:
- The password for use in HTTP basic authentication.
- If the I(url_username) parameter is not specified, the I(url_password) parameter will not be used.
force_basic_auth:
description:
- Credentials specified with I(url_username) and I(url_password) should be passed in HTTP Header.
default: no
type: bool
client_cert:
description:
- PEM formatted certificate chain file to be used for SSL client
authentication. This file can also include the key as well, and if
the key is included, C(client_key) is not required.
client_key:
description:
- PEM formatted file that contains your private key to be used for SSL
client authentication. If C(client_cert) contains both the certificate
and key, this option is not required.
"""
|
mit
|
Python
|
e9df15b0f084ed9e026a5de129b109a3c546f99c
|
Handle comments in parse tree.
|
andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper,andybalaam/pepper
|
src/libeeyore/parse_tree_to_cpp.py
|
src/libeeyore/parse_tree_to_cpp.py
|
from itertools import imap
import builtins
from cpp.cpprenderer import EeyCppRenderer
from environment import EeyEnvironment
from functionvalues import *
from languagevalues import *
from values import *
def parse_tree_string_to_values( string ):
return eval( string )
def remove_comments( ln ):
i = ln.find( "#" )
if i != -1:
return ln[:i]
else:
return ln
def non_empty_line( ln ):
return ( ln.strip() != "" )
def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ):
env = EeyEnvironment( EeyCppRenderer() )
builtins.add_builtins( env )
values = ( parse_tree_string_to_values( ln ) for ln in
filter( non_empty_line, imap( remove_comments, parse_tree_in_fl ) ) )
cpp_out_fl.write( env.render_exe( values ) )
|
import builtins
from cpp.cpprenderer import EeyCppRenderer
from environment import EeyEnvironment
from values import *
def parse_tree_string_to_values( string ):
return eval( string )
def non_empty_line( ln ):
return ( ln.strip() != "" )
def parse_tree_to_cpp( parse_tree_in_fl, cpp_out_fl ):
env = EeyEnvironment( EeyCppRenderer() )
builtins.add_builtins( self )
values = ( parse_tree_string_to_values( ln ) for ln in
filter( non_empty_line, parse_tree_in_fl ) )
cpp_out_fl.write( env.render_exe( values ) )
|
mit
|
Python
|
7d09f713b929f60cd62ce48de2e2a8f27aa4de45
|
Fix unit tests.
|
qusp/orange3,kwikadi/orange3,marinkaz/orange3,qPCR4vir/orange3,kwikadi/orange3,marinkaz/orange3,kwikadi/orange3,kwikadi/orange3,cheral/orange3,cheral/orange3,marinkaz/orange3,marinkaz/orange3,qusp/orange3,qusp/orange3,marinkaz/orange3,qPCR4vir/orange3,cheral/orange3,qPCR4vir/orange3,qPCR4vir/orange3,cheral/orange3,cheral/orange3,marinkaz/orange3,qusp/orange3,qPCR4vir/orange3,cheral/orange3,qPCR4vir/orange3,kwikadi/orange3,kwikadi/orange3
|
Orange/tests/test_random_forest.py
|
Orange/tests/test_random_forest.py
|
import unittest
import Orange.data
import Orange.classification.random_forest as rf
from Orange.evaluation import scoring, testing
class RandomForestTest(unittest.TestCase):
def test_RandomForest(self):
table = Orange.data.Table('iris')
forest = rf.RandomForestLearner()
results = testing.CrossValidation(table, [forest], k=10)
ca = scoring.CA(results)
self.assertGreater(ca, 0.9)
self.assertLess(ca, 0.99)
def test_predict_single_instance(self):
table = Orange.data.Table('iris')
forest = rf.RandomForestLearner()
c = forest(table)
for ins in table:
c(ins)
val, prob = c(ins, c.ValueProbs)
def test_predict_table(self):
table = Orange.data.Table('iris')
forest = rf.RandomForestLearner()
c = forest(table)
c(table)
vals, probs = c(table, c.ValueProbs)
def test_predict_numpy(self):
table = Orange.data.Table('iris')
forest = rf.RandomForestLearner()
c = forest(table)
c(table.X)
vals, probs = c(table.X, c.ValueProbs)
|
import unittest
import Orange.data
import Orange.classification.random_forest as rf
from Orange.evaluation import scoring, testing
class RandomForestTest(unittest.TestCase):
def test_RandomForest(self):
table = Orange.data.Table('titanic')
forest = rf.RandomForestLearner()
results = testing.CrossValidation(table[::20], [forest], k=10)
ca = scoring.CA(results)
self.assertGreater(ca, 0.7)
self.assertLess(ca, 0.9)
def test_predict_single_instance(self):
table = Orange.data.Table('titanic')
forest = rf.RandomForestLearner()
c = forest(table)
for ins in table[::20]:
c(ins)
val, prob = c(ins, c.ValueProbs)
def test_predict_table(self):
table = Orange.data.Table('titanic')
forest = rf.RandomForestLearner()
c = forest(table)
table = table[::20]
c(table)
vals, probs = c(table, c.ValueProbs)
def test_predict_numpy(self):
table = Orange.data.Table('titanic')
forest = rf.RandomForestLearner()
c = forest(table)
X = table.X[::20]
c(X)
vals, probs = c(X, c.ValueProbs)
|
bsd-2-clause
|
Python
|
7041d2649b08d961cf5c7c4c663282e55526f2eb
|
Update pictures.py
|
haitaka/DroiTaka
|
cogs/pictures.py
|
cogs/pictures.py
|
from discord.ext import commands
import copy
import requests
class Pic:
"""Мемасики и просто картинки."""
def __init__(self, bot):
self.bot = bot
self.pic_dir = 'pictures/'
self.pic_dict = {}
self.update_pics()
def update_pics(self):
file_list = self.bot.pycopy.list_files(self.pic_dir)
for file_name in file_list:
self.pic_dict[file_name.split('.')[0]] = file_name
self.pic.aliases = list(self.pic_dict.values())
@commands.group(pass_context=True, aliases=[])
async def pic(self, ctx):
"""База картинок, мемесов etc."""
if ctx.invoked_with in self.pic_dict:
file = self.bot.pycopy.get_file(self.pic_path + self.pic_dict[ctx.invoked_with])
await self.bot.upload(file, self.pic_dict[ctx.invoked_with])
elif ctx.invoked_subcommand is None:
msg = copy.copy(ctx.message)
msg.content = ctx.prefix + 'help pic'
await self.bot.process_commands(msg)
@pic.command()
async def update(self):
"""Обновить список картиночек."""
self.update_pics()
await self.bot.say("Найдено {} картиночек.".format(len(self.pic_dict)))
@pic.command()
async def list(self):
"""Вывести список картиночек."""
pic_list = ''
id = 1
for pic in self.pic_dict:
pic_list += "{}. {}\n".format(id, pic)
id += 1
if len(pic_list) > 1800:
await self.bot.say(pic_list)
pic_list = ''
await self.bot.say(pic_list)
def setup(bot):
bot.add_cog(Pic(bot))
|
from discord.ext import commands
import copy
import requests
class Pic:
"""Мемасики и просто картинки."""
def __init__(self, bot):
self.bot = bot
self.pic_dir = 'pictures/'
self.pic_dict = {}
self.update_pics()
def update_pics(self):
file_list = self.bot.pycopy.list_files(self.pic_dir)
for file_name in file_list:
self.pic_dict[file_name.split('.')[0]] = file_name
self.pic.aliases = list(self.pic_dict.values())
@commands.group(pass_context=True, aliases=[])
async def pic(self, ctx):
"""База картинок, мемесов etc."""
if ctx.invoked_with in self.pic_dict:
url = self.bot.pycopy.direct_link(self.pic_path + self.pic_dict[ctx.invoked_with])
r = requests.get(url, stream=True)
if r.status_code == 200:
r.raw.decode_content = True
await self.bot.upload(r.raw, self.pic_dict[ctx.invoked_with])
elif ctx.invoked_subcommand is None:
msg = copy.copy(ctx.message)
msg.content = ctx.prefix + 'help pic'
await self.bot.process_commands(msg)
@pic.command()
async def update(self):
"""Обновить список картиночек."""
self.update_pics()
await self.bot.say("Найдено {} картиночек.".format(len(self.pic_dict)))
@pic.command()
async def list(self):
"""Вывести список картиночек."""
pic_list = ''
id = 1
for pic in self.pic_dict:
pic_list += "{}. {}\n".format(id, pic)
id += 1
if len(pic_list) > 1800:
await self.bot.say(pic_list)
pic_list = ''
await self.bot.say(pic_list)
def setup(bot):
bot.add_cog(Pic(bot))
|
mit
|
Python
|
a6a78260b47f3a632564e7a80ce25b3b75e242e9
|
Add sample code for API key authentication
|
timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug
|
examples/authentication.py
|
examples/authentication.py
|
'''A basic example of authentication requests within a hug API'''
import hug
# Several authenticators are included in hug/authentication.py. These functions
# accept a verify_user function, which can be either an included function (such
# as the basic username/bassword function demonstrated below), or logic of your
# own. Verification functions return an object to store in the request context
# on successful authentication. Naturally, this is a trivial demo, and a much
# more robust verification function is recommended. This is for strictly
# illustrative purposes.
authentication = hug.authentication.basic(hug.authentication.verify('User1', 'mypassword'))
@hug.get('/public')
def public_api_call():
return "Needs no authentication"
# Note that the logged in user can be accessed via a built-in directive.
# Directives can provide computed input parameters via an abstraction
# layer so as not to clutter your API functions with access to the raw
# request object.
@hug.get('/authenticated', requires=authentication)
def basic_auth_api_call(user: hug.directives.user):
return 'Successfully authenticated with user: {0}'.format(user)
# Here is a slightly less trivial example of how authentication might
# look in an API that uses keys.
# First, the user object stored in the context need not be a string,
# but can be any Python object.
class APIUser(object):
"""A minimal example of a rich User object"""
def __init__(self, user_id, api_key):
self.user_id = user_id
self.api_key = api_key
def api_key_verify(api_key):
magic_key = '5F00832B-DE24-4CAF-9638-C10D1C642C6C' # Obviously, this would hit your database
if api_key == magic_key:
# Success!
return APIUser('user_foo', api_key)
else:
# Invalid key
return None
api_key_authentication = hug.authentication.api_key(api_key_verify)
@hug.get('/key_authenticated', requires=api_key_authentication)
def basic_auth_api_call(user: hug.directives.user):
return 'Successfully authenticated with user: {0}'.format(user.user_id)
|
'''A basic example of authentication requests within a hug API'''
import hug
# Several authenticators are included in hug/authentication.py. These functions
# accept a verify_user function, which can be either an included function (such
# as the basic username/bassword function demonstrated below), or logic of your
# own. Verification functions return an object to store in the request context
# on successful authentication. Naturally, this is a trivial demo, and a much
# more robust verification function is recommended. This is for strictly
# illustrative purposes.
authentication = hug.authentication.basic(hug.authentication.verify('User1', 'mypassword'))
# Note that the logged in user can be accessed via a built-in directive.
# Directives can provide computed input parameters via an abstraction
# layer so as not to clutter your API functions with access to the raw
# request object.
@hug.get('/authenticated', requires=authentication)
def api_call1(user: hug.directives.user):
return "Successfully authenticated with user: {0}".format(user)
@hug.get('/public')
def api_call2():
return "Needs no authentication"
|
mit
|
Python
|
6e660da290db674eebb0c353662e5400bc735397
|
Update backplane demo to be py3 only
|
sparkslabs/guild,sparkslabs/guild,sparkslabs/guild
|
examples/backplane_demo.py
|
examples/backplane_demo.py
|
#!/usr/bin/python
import time
from guild.actor import *
from guild.components import Backplane, PublishTo, SubscribeTo, Printer
class Producer(Actor):
@process_method
def process(self):
self.output("hello")
@late_bind_safe
def output(self, value):
pass
Backplane("HELLO").start()
p = Producer()
pr = Printer()
time.sleep(1)
pub = PublishTo("HELLO")
sub = SubscribeTo("HELLO")
print("pub", pub, repr(pub), pub.input)
pipeline(p, pub)
pipeline(sub, pr)
start(p, pr, sub)
time.sleep(1.0)
stop(p, pr, sub)
wait_for(p, pr, sub)
|
#!/usr/bin/python
import time
from guild.actor import *
from guild.components import Backplane, PublishTo, SubscribeTo, Printer
class Producer(Actor):
@process_method
def process(self):
self.output("hello")
@late_bind_safe
def output(self, value):
pass
Backplane("HELLO").start()
p = Producer()
pr = Printer()
time.sleep(1)
pub = PublishTo("HELLO")
sub = SubscribeTo("HELLO")
print "pub", pub, repr(pub), pub.input
pipeline(p, pub)
pipeline(sub, pr)
start(p, pr, sub)
time.sleep(1.0)
stop(p, pr, sub)
wait_for(p, pr, sub)
|
apache-2.0
|
Python
|
0b37c0f1cba1a6e89a63f9597d61383b81b1a2d9
|
Fix typo
|
SahilTikale/haas,CCI-MOC/haas
|
haas/client/network.py
|
haas/client/network.py
|
import json
from haas.client.base import ClientBase
class Network(ClientBase):
"""Consists of calls to query and manipulate network related
objects and relations.
"""
def list(self):
"""Lists all networks under HIL """
url = self.object_url('networks')
return self.check_response(self.httpClient.request("GET", url))
def show(self, network):
"""Shows attributes of a network. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("GET", url))
def create(self, network, owner, access, net_id):
"""Create a link-layer <network>.
See docs/networks.md for details.
"""
url = self.object_url('network', network)
payload = json.dumps({
'owner': owner, 'access': access,
'net_id': net_id
})
return self.check_response(
self.httpClient.request("PUT", url, data=payload)
)
def delete(self, network):
"""Delete a <network>. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("DELETE", url))
def grant_access(self, project, network):
"""Grants <project> access to <network>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("PUT", url))
def revoke_access(self, project, network):
"""Removes access of <network> from <project>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("DELETE", url))
|
import json
from haas.client.base import ClientBase
class Network(ClientBase):
"""Consists of calls to query and manipulate network related
objects and relations.
"""
def list(self):
"""Lists all projects under HIL """
url = self.object_url('networks')
return self.check_response(self.httpClient.request("GET", url))
def show(self, network):
"""Shows attributes of a network. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("GET", url))
def create(self, network, owner, access, net_id):
"""Create a link-layer <network>.
See docs/networks.md for details.
"""
url = self.object_url('network', network)
payload = json.dumps({
'owner': owner, 'access': access,
'net_id': net_id
})
return self.check_response(
self.httpClient.request("PUT", url, data=payload)
)
def delete(self, network):
"""Delete a <network>. """
url = self.object_url('network', network)
return self.check_response(self.httpClient.request("DELETE", url))
def grant_access(self, project, network):
"""Grants <project> access to <network>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("PUT", url))
def revoke_access(self, project, network):
"""Removes access of <network> from <project>. """
url = self.object_url(
'network', network, 'access', project
)
return self.check_response(self.httpClient.request("DELETE", url))
|
apache-2.0
|
Python
|
56f6339401fe5f792915279d98f553f3415e2c62
|
Fix module docstring (#163)
|
balloob/netdisco
|
netdisco/discoverables/harmony.py
|
netdisco/discoverables/harmony.py
|
"""Discover Harmony Hub remotes."""
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Harmony Hub remotes"""
def get_entries(self):
"""Get all the Harmony uPnP entries."""
return self.find_by_device_description({
"manufacturer": "Logitech",
"deviceType": "urn:myharmony-com:device:harmony:1"
})
|
"""Discover Netgear routers."""
from . import SSDPDiscoverable
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Harmony Hub remotes"""
def get_entries(self):
"""Get all the Harmony uPnP entries."""
return self.find_by_device_description({
"manufacturer": "Logitech",
"deviceType": "urn:myharmony-com:device:harmony:1"
})
|
mit
|
Python
|
d3425693d245c9dfa5350017903fc02a11ecd881
|
use width/height as percent base for x/y
|
pureqml/qmlcore,pureqml/qmlcore,pureqml/qmlcore
|
compiler/lang.py
|
compiler/lang.py
|
import re
def value_is_trivial(value):
if value is None or not isinstance(value, str):
return False
if value[0] == '(' and value[-1] == ')':
value = value[1:-1]
if value == 'true' or value == 'false':
return True
try:
float(value)
return True
except:
pass
if value[0] == '"' and value[-1] == '"':
if value.count('"') == value.count('\\"') + 2:
return True
#print "?trivial", value
return False
class DocumentationString(object):
def __init__(self, text):
self.text = text
class Entity(object):
def __init__(self):
self.doc = None
class Component(Entity):
def __init__(self, name, children):
super(Component, self).__init__()
self.name = name
self.children = children
class Property(Entity):
def __init__(self, type, name, value = None):
super(Property, self).__init__()
self.type = type
self.name = name
self.value = value
def is_trivial(self):
return value_is_trivial(self.value)
class AliasProperty(Entity):
def __init__(self, name, target):
super(AliasProperty, self).__init__()
self.name = name
self.target = target
class EnumProperty(Entity):
def __init__(self, name, values, default):
super(EnumProperty, self).__init__()
self.name = name
self.values = values
self.default = default
class Constructor(Entity):
def __init__(self, args, code):
super(Constructor, self).__init__()
if len(args) != 0:
raise Exception("no arguments for constructor allowed")
self.code = code
class Method(Entity):
def __init__(self, name, args, code, event):
super(Method, self).__init__()
self.name = name
self.args = args
self.code = code
self.event = event
class IdAssignment(Entity):
def __init__(self, name):
super(IdAssignment, self).__init__()
self.name = name
class Assignment(Entity):
re_name = re.compile('<property-name>')
def __init__(self, target, value):
super(Assignment, self).__init__()
self.target = target
dot = target.rfind('.')
property_name = target[dot + 1:] if dot >= 0 else target
if property_name == 'x':
property_name = 'width'
elif property_name == 'y':
property_name = 'height'
self.value = Assignment.re_name.sub(property_name, value) if isinstance(value, str) else value
def is_trivial(self):
return value_is_trivial(self.value)
class AssignmentScope(Entity):
def __init__(self, target, values):
super(AssignmentScope, self).__init__()
self.target = target
self.values = values
class Behavior(Entity):
def __init__(self, target, animation):
super(Behavior, self).__init__()
self.target = target
self.animation = animation
class Signal(Entity):
def __init__(self, name):
super(Signal, self).__init__()
self.name = name
class ListElement(Entity):
def __init__(self, data):
super(ListElement, self).__init__()
self.data = data
|
import re
def value_is_trivial(value):
if value is None or not isinstance(value, str):
return False
if value[0] == '(' and value[-1] == ')':
value = value[1:-1]
if value == 'true' or value == 'false':
return True
try:
float(value)
return True
except:
pass
if value[0] == '"' and value[-1] == '"':
if value.count('"') == value.count('\\"') + 2:
return True
#print "?trivial", value
return False
class DocumentationString(object):
def __init__(self, text):
self.text = text
class Entity(object):
def __init__(self):
self.doc = None
class Component(Entity):
def __init__(self, name, children):
super(Component, self).__init__()
self.name = name
self.children = children
class Property(Entity):
def __init__(self, type, name, value = None):
super(Property, self).__init__()
self.type = type
self.name = name
self.value = value
def is_trivial(self):
return value_is_trivial(self.value)
class AliasProperty(Entity):
def __init__(self, name, target):
super(AliasProperty, self).__init__()
self.name = name
self.target = target
class EnumProperty(Entity):
def __init__(self, name, values, default):
super(EnumProperty, self).__init__()
self.name = name
self.values = values
self.default = default
class Constructor(Entity):
def __init__(self, args, code):
super(Constructor, self).__init__()
if len(args) != 0:
raise Exception("no arguments for constructor allowed")
self.code = code
class Method(Entity):
def __init__(self, name, args, code, event):
super(Method, self).__init__()
self.name = name
self.args = args
self.code = code
self.event = event
class IdAssignment(Entity):
def __init__(self, name):
super(IdAssignment, self).__init__()
self.name = name
class Assignment(Entity):
re_name = re.compile('<property-name>')
def __init__(self, target, value):
super(Assignment, self).__init__()
self.target = target
def replace_name(m):
dot = target.rfind('.')
name = target.substr(dot + 1) if dot >= 0 else target
return name
self.value = Assignment.re_name.sub(replace_name, value) if isinstance(value, str) else value
def is_trivial(self):
return value_is_trivial(self.value)
class AssignmentScope(Entity):
def __init__(self, target, values):
super(AssignmentScope, self).__init__()
self.target = target
self.values = values
class Behavior(Entity):
def __init__(self, target, animation):
super(Behavior, self).__init__()
self.target = target
self.animation = animation
class Signal(Entity):
def __init__(self, name):
super(Signal, self).__init__()
self.name = name
class ListElement(Entity):
def __init__(self, data):
super(ListElement, self).__init__()
self.data = data
|
mit
|
Python
|
08d11dc308db007750fe06ea906264a6ab9f44cd
|
Add logging when cloning repository
|
omarkhan/opencraft,open-craft/opencraft,open-craft/opencraft,omarkhan/opencraft,omarkhan/opencraft,open-craft/opencraft,brousch/opencraft,open-craft/opencraft,omarkhan/opencraft,brousch/opencraft,open-craft/opencraft,brousch/opencraft
|
instance/repo.py
|
instance/repo.py
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Git repository - Helper functions
"""
# Imports #####################################################################
import git
import tempfile
import shutil
from contextlib import contextmanager
# Logging #####################################################################
import logging
logger = logging.getLogger(__name__)
# Functions ###################################################################
@contextmanager
def open_repository(repo_url, ref='master'):
"""
Get a `Git` object for a repository URL and switch it to the branch `ref`
Note that this clones the repository locally
"""
repo_dir_path = tempfile.mkdtemp()
logger.info('Cloning repository %s (ref=%s) in %s...', repo_url, ref, repo_dir_path)
git.repo.base.Repo.clone_from(repo_url, repo_dir_path)
g = git.Git(repo_dir_path)
g.checkout(ref)
yield g
shutil.rmtree(repo_dir_path)
|
# -*- coding: utf-8 -*-
#
# OpenCraft -- tools to aid developing and hosting free software projects
# Copyright (C) 2015 OpenCraft <xavier@opencraft.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Git repository - Helper functions
"""
# Imports #####################################################################
import git
import tempfile
import shutil
from contextlib import contextmanager
# Functions ###################################################################
@contextmanager
def open_repository(repo_url, ref='master'):
"""
Get a `Git` object for a repository URL and switch it to the branch `ref`
Note that this clones the repository locally
"""
repo_dir_path = tempfile.mkdtemp()
git.repo.base.Repo.clone_from(repo_url, repo_dir_path)
g = git.Git(repo_dir_path)
g.checkout(ref)
yield g
shutil.rmtree(repo_dir_path)
|
agpl-3.0
|
Python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.