commit
stringlengths
40
40
subject
stringlengths
4
1.73k
repos
stringlengths
5
127k
old_file
stringlengths
2
751
new_file
stringlengths
2
751
new_contents
stringlengths
1
8.98k
old_contents
stringlengths
0
6.59k
license
stringclasses
13 values
lang
stringclasses
23 values
f3c9284bf7b5d9ae4acc413fd7feb824fdb7aca0
create field to exclude recomputation of old invoices
OCA/l10n-italy,dcorio/l10n-italy,dcorio/l10n-italy,OCA/l10n-italy,OCA/l10n-italy,dcorio/l10n-italy
l10n_it_fatturapa_in/migrations/12.0.1.18.3/pre-migration.py
l10n_it_fatturapa_in/migrations/12.0.1.18.3/pre-migration.py
from openupgradelib import openupgrade @openupgrade.migrate() def migrate(env, version): if not version: return openupgrade.logged_query( env.cr, """ ALTER TABLE fatturapa_attachment_in ADD COLUMN IF NOT EXISTS invoices_date character varying """, )
agpl-3.0
Python
60068d4deeba541b9518579d6d8473c4300e189d
Test killing onitu during a transfer
onitu/onitu,onitu/onitu,onitu/onitu
tests/functional/test_crash.py
tests/functional/test_crash.py
import os.path from os import unlink from utils.launcher import Launcher from utils.entries import Entries from utils.loop import CounterLoop, BooleanLoop from utils.files import generate, checksum from utils.tempdirs import TempDirs launcher = None dirs = TempDirs() rep1, rep2 = dirs.create(), dirs.create() json_file = 'test_crash.json' def setup_module(module): global launcher entries = Entries() entries.add('local_storage', 'rep1', {'root': rep1}) entries.add('local_storage', 'rep2', {'root': rep2}) entries.save(json_file) launcher = Launcher(json_file) def teardown_module(module): launcher.kill() unlink(json_file) dirs.delete() def launcher_startup(): loop = CounterLoop(3) launcher.on_referee_started(loop.check) launcher.on_driver_started(loop.check, driver='rep1') launcher.on_driver_started(loop.check, driver='rep2') launcher() loop.run(timeout=5) def test_crach(): filename = 'crash' loop = BooleanLoop() launcher.on_transfer_started( loop.stop, d_from='rep1', d_to='rep2', filename=filename ) launcher_startup() generate(os.path.join(rep1, filename), 1000) loop.run(timeout=5) launcher.kill() launcher.unset_all_events() loop = BooleanLoop() launcher.on_transfer_ended( loop.stop, d_from='rep1', d_to='rep2', filename=filename ) launcher_startup() loop.run(timeout=5) assert(checksum(os.path.join(rep1, filename)) == checksum(os.path.join(rep2, filename))) launcher.kill()
mit
Python
e541d2c6c9c71647201ad39eb8a774eabe243139
Add gaussian smoothing example (#485)
akaszynski/vtkInterface
examples/01-filter/gaussian-smoothing.py
examples/01-filter/gaussian-smoothing.py
""" Gaussian smoothing ~~~~~~~~~~~~~~~~~~ Perform a gaussian convolution. """ import pyvista as pv from pyvista import examples # Load dataset data = examples.download_gourds() # Define a good point of view cp = [ (319.5, 239.5, 1053.7372980874645), (319.5, 239.5, 0.0), (0.0, 1.0, 0.0) ] ############################################################################### # Let's apply the gaussian smoothing with different values of standard # deviation. p = pv.Plotter(shape=(2, 2)) p.subplot(0, 0) p.add_text("Original Image", font_size=24) p.add_mesh(data, rgb=True) p.camera_position = cp p.subplot(0, 1) p.add_text("Gaussian smoothing, std=2", font_size=24) p.add_mesh(data.gaussian_smooth(std_dev=2.), rgb=True) p.camera_position = cp p.subplot(1, 0) p.add_text("Gaussian smoothing, std=4", font_size=24) p.add_mesh(data.gaussian_smooth(std_dev=4.), rgb=True) p.camera_position = cp p.subplot(1, 1) p.add_text("Gaussian smoothing, std=8", font_size=24) p.add_mesh(data.gaussian_smooth(std_dev=8.), rgb=True) p.camera_position = cp p.show() ############################################################################### # Now let's see an example on a 3D dataset with volume rendering: data = examples.download_brain() smoothed_data = data.gaussian_smooth(std_dev=3.) dargs = dict(clim=smoothed_data.get_data_range(), opacity=[0, 0, 0, 0.1, 0.3, 0.6, 1]) n = [100, 150, 200, 245, 255] p = pv.Plotter(shape=(1, 2), notebook=0) p.subplot(0, 0) p.add_text("Original Image", font_size=24) # p.add_mesh(data.contour(n), **dargs) p.add_volume(data, **dargs) p.subplot(0, 1) p.add_text("Gaussian smoothing", font_size=24) # p.add_mesh(smoothed_data.contour(n), **dargs) p.add_volume(smoothed_data, **dargs) p.link_views() p.camera_position = [(-162.0, 704.8, 65.02), (90.0, 108.0, 90.0), (0.0068, 0.0447, 0.999)] p.show()
mit
Python
59d435ab1d0e5347180f60633d316aa7f2a3abdb
add send_TWH_text module to package
Timothy-W-Hilton/TimPyUtils
timutils/send_TWH_txt.py
timutils/send_TWH_txt.py
""" short module to send a text message to Tim Hilton's phone using Verizon's email-to-sms support and gmail's smtp mail server. I was unable to get UC Merced's outlook.com server to accept the outgoing message. Timothy W. Hilton, UC Merced, 25 Feb 2014 """ import smtplib import getpass def get_outgoing_mail_password(): pwd = getpass.getpass(prompt='Gmail password: ') if len(pwd) == 0: pwd = None return(pwd) def send_vtext_gmail(gmail_passwd, gmail_uname='timothy.w.hilton@gmail.com', dest_phone_num='4153147478', msg_txt='testing 123'): vtext_addr = "{}@vtext.com".format(dest_phone_num) msg = """From: %s To: %s Subject: text-message\n %s""" % (gmail_uname, vtext_addr, msg_txt) server = smtplib.SMTP('smtp.gmail.com',587) server.starttls() server.login(gmail_uname,gmail_passwd) server.sendmail(gmail_uname, vtext_addr, msg) server.quit() def send_vtext_outlook(ucmerced_uname, smtp_password, dest_phone_num, msg_txt): """ 25 Feb 2014: couldn't get sending mail through UC Merced's outlook.com SMTP server to work. Probably something related to the formatting of the outlook.com username? -TWH """ vtext_addr = "{}@vtext.com".format(dest_phone_num) smtp_uname = "{}@ucmerced.edu".format(ucmerced_uname) msg = """From: %s To: %s Subject: text-message %s""" % (smtp_uname, vtext_addr, msg_txt) print smtp_uname result = 0 # server = smtplib.SMTP('pod51011.outlook.com',587) # server.starttls() # server.login(smtp_uname,smtp_password) # result = server.sendmail(smtp_uname, vtext_addr, msg) # server.quit() print result if __name__ == "__main__": passwd = get_outgoing_mail_password() if passwd is not None: send_vtext_gmail(passwd, msg_txt='here is the message') else: print('no password provided')
mit
Python
4ef17b96531a511b7ad620a0753594a2892af65c
Add monte_carlo_multigpu.py
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
examples/finance/monte_carlo_multigpu.py
examples/finance/monte_carlo_multigpu.py
import argparse import contextlib import sys import time import cupy import numpy from black_scholes import black_scholes_kernel from monte_carlo import monte_carlo_kernel # CuPy also implements a feature to call kernels in different GPUs. # Through this sample, we will explain how to allocate arrays # in different devices, and call kernels in parallel. if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpus', type=int, nargs='*', default=[0], help='GPU IDs') parser.add_argument('--n-options', default=1000, type=int) parser.add_argument('--n-samples-per-thread', default=1000, type=int) parser.add_argument('--n-threads-per-option', default=10000, type=int) args = parser.parse_args() if len(args.gpus) == 0: print('At least one GPU is required.') sys.exit(1) def rand_range(m, M): samples = numpy.random.rand(args.n_options) return (m + (M - m) * samples).astype(numpy.float64) print('initializing...') stock_price_cpu = rand_range(5, 30) option_strike_cpu = rand_range(1, 100) option_years_cpu = rand_range(0.25, 10) risk_free = 0.02 volatility = 0.3 stock_price_gpus = [] option_strike_gpus = [] option_years_gpus = [] call_prices_gpus = [] print('start computation') print(' # of gpus: {}'.format(len(args.gpus))) print(' # of options: {}'.format(args.n_options)) print(' # of samples per option: {}'.format( len(args.gpus) * args.n_samples_per_thread * args.n_threads_per_option) ) # Allocate arrays in different devices for gpu_id in args.gpus: with cupy.cuda.Device(gpu_id): stock_price_gpus.append(cupy.array(stock_price_cpu)) option_strike_gpus.append(cupy.array(option_strike_cpu)) option_years_gpus.append(cupy.array(option_years_cpu)) call_prices_gpus.append(cupy.empty( (args.n_options, args.n_threads_per_option), dtype=numpy.float64)) @contextlib.contextmanager def timer(message): cupy.cuda.Stream.null.synchronize() start = time.time() yield cupy.cuda.Stream.null.synchronize() end = time.time() print('%s:\t%f sec' % (message, end - start)) with timer('GPU (CuPy, Monte Carlo method)'): for i, gpu_id in enumerate(args.gpus): # Performs Monte-Carlo simulations in parallel with cupy.cuda.Device(gpu_id): monte_carlo_kernel( stock_price_gpus[i][:, None], option_strike_gpus[i][:, None], option_years_gpus[i][:, None], risk_free, volatility, args.n_samples_per_thread, i, call_prices_gpus[i]) # Transfer the result from the GPUs call_prices = [c.get() for c in call_prices_gpus] call_mc = numpy.concatenate(call_prices).reshape( len(args.gpus), args.n_options, args.n_threads_per_option) call_mc = call_mc.mean(axis=(0, 2)) # Compute the error between the value of the exact solution # and that of the Monte-Carlo simulation with cupy.cuda.Device(args.gpus[0]): call_bs = black_scholes_kernel( stock_price_gpus[0], option_strike_gpus[0], option_years_gpus[0], risk_free, volatility)[0].get() error = cupy.std(call_mc - call_bs) print('Error: %f' % error)
mit
Python
2b4c065b986ca1e05d0755b2b64502861b17364d
add import script for Oldham
DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
polling_stations/apps/data_collection/management/commands/import_oldham.py
polling_stations/apps/data_collection/management/commands/import_oldham.py
from data_collection.management.commands import BaseXpressCsvImporter class Command(BaseXpressCsvImporter): council_id = 'E08000004' addresses_name = 'OldhamPropertyPostCodePollingStationWebLookup-2017-02-16.TSV' stations_name = 'OldhamPropertyPostCodePollingStationWebLookup-2017-02-16.TSV' elections = ['mayor.greater-manchester.2017-05-04'] csv_delimiter = '\t'
bsd-3-clause
Python
1064b7bc9e343f3ab9308172f6a3129745e7a548
add test.py
gabstopper/smc-python
test.py
test.py
#!/usr/bin/python import smc from pprint import pprint import time import logging logger = logging.getLogger(__name__) smc.login('http://172.18.1.150:8082', 'EiGpKD4QxlLJ25dbBEp20001') #Example of using a search filter #Response is a json record with a reference link to the object #smc.get_element_by_href(href) gets the record directly #Search for group named (Skype Servers) mygroup = smc.filter_by_type('group', 'Skype Servers') if mygroup: pprint(smc.get_element_by_href(mygroup['href'])) #Search for single_fw instance named vmware-fw myfw = smc.filter_by_type('single_fw', 'vmware-fw') if myfw: pprint(smc.get_element_by_href(myfw['href'])) #Search for host named ami myhost = smc.filter_by_type('host', 'ami') if myhost: pprint(smc.get_element_by_href(myhost['href'])) #Search by top level element if element type is not known myobject = smc.filter_by_element('myelement') ''' #Creating/removing a host record. Validation is done based on IP address. smc.create_host('ami', '1.1.1.2') smc.remove_host('ami') smc.create_host('a', 'a.b.c.d') #Should fail, not valid IP smc.remove_host('ami2') #should fail if host doesn't exist ''' ''' #Create group and add members smc.create_group('group_with_no_members') smc.create_host('ami', '1.1.1.1') smc.create_host('ami2', '2.2.2.2') smc.create_group('anewgroup', ['ami','ami2']) ''' ''' #Example of creating a group record. If members is included, each member href #needs to be validated or warning will be issued that members can't be added smc.create_group('mygroup') smc.create_group('mygroup', ['member1','member2','member3']) ''' ''' #Example of creating a single_fw instance. method signature is: #smc.create_single_fw(name, IP (mgmt), network (mgmt), dns=None, fw_license=None) #If DNS and fw_license are provided, DNS is added to fw and an attempt is made to attach an available license if available smc.create_single_fw('lepage', '172.18.1.5', '172.18.1.0/24', dns='5.5.5.5', fw_license=True) time.sleep(5) smc.remove_single_fw('lepage') ''' ''' #Get available dynamic licenses print "License: %s" % smc.get_dynamic_license() ''' smc.logout()
apache-2.0
Python
fa2fd9cdab29a5736ae6b69c5f754f92a33c7f74
add wsgi.py
xuanthuong/golfgame
wsgi.py
wsgi.py
from server import app if __name__ == "__main__": app.run()
mit
Python
f4e12493c000b6bb3051e9c201347d420c8dd687
add basis for netcomp class
schae234/Camoco,schae234/Camoco
camoco/NetComp.py
camoco/NetComp.py
from .COB import COB class NetComp(Camoco): def __init__(self,name,networks): self.networks = set() # Add all the networks for n in networks: self.add_network(n) def add_network(self,net): ''' Add a network (COB) to the NetComp object. ''' if isinstance(net,str): net = COB(net) if not isinstance(net,COB): raise ValueError(f'a valid network must be provided') self.networks.add(net)
mit
Python
2a3b89f42cde7088b304a3f224eaf52894f544ec
Add an python example for stream testing
xfleckx/BeMoBI,xfleckx/BeMoBI
misc/utils/LSL_Tests/RecieveAppStatistics.py
misc/utils/LSL_Tests/RecieveAppStatistics.py
"""Example program to show how to read a multi-channel time series from LSL.""" from pylsl import StreamInlet, resolve_stream import sys # first resolve an EEG stream on the lab network print("looking for an Unity3D.AppStatistics stream...") streams = resolve_stream('type', 'Unity3D.FPS.FT') # create a new inlet to read from the stream inlet = StreamInlet(streams[0]) while True: # get a new sample (you can also omit the timestamp part if you're not # interested in it) sample, timestamp = inlet.pull_sample() print '\r' + str(round(timestamp)) + '\t' + str(sample), sys.stdout.flush()
mit
Python
52c7d6ba8f6dcb6c6f1bd02790ab9bb7fae8ebcd
add script
adamewing/tebreak,adamewing/tebreak
scripts/grabBAMrecs.py
scripts/grabBAMrecs.py
#!/usr/bin/env python import sys import pysam import os import re from collections import defaultdict as dd import logging logger = logging.getLogger(__name__) FORMAT = '%(asctime)s %(message)s' logging.basicConfig(format=FORMAT) logger.setLevel(logging.INFO) def find_mate(read, bam): ''' AlignmentFile.mate() can return a non-primary alignment, so use this function instead ''' chrom = read.next_reference_name for rec in bam.fetch(chrom, read.next_reference_start, read.next_reference_start+1): if rec.query_name == read.query_name and rec.reference_start == read.next_reference_start: if not rec.is_secondary and bin(rec.flag & 2048) != bin(2048): if rec.is_read1 != read.is_read1: return rec return None if len(sys.argv) == 3: inbam = pysam.AlignmentFile(sys.argv[1], 'rb') outfn = '.'.join(os.path.basename(sys.argv[1]).split('.')[:-1]) + '.' + re.sub(':', '_', sys.argv[2]) + '.bam' outbam = pysam.AlignmentFile(outfn, 'wb', template=inbam) seen = dd(list) for read in inbam.fetch(region=sys.argv[2]): if not read.is_supplementary and not read.is_secondary and not read.mate_is_unmapped: outbam.write(read) seen[read.qname].append(read.is_read1) seen_pairs = 0 seen_alone = 0 for qname, pair in seen.iteritems(): assert len(set(pair)) <= 2 if len(set(pair)) == 2: seen_pairs += 1 if len(set(pair)) == 1: seen_alone += 1 logger.info('%d pairs inside and %d mates outside region %s' % (seen_pairs, seen_alone, sys.argv[2])) matebam = pysam.AlignmentFile(sys.argv[1], 'rb') for read in inbam.fetch(region=sys.argv[2]): if not read.is_supplementary and not read.is_secondary and not read.mate_is_unmapped: assert read.qname in seen if len(set(seen[read.qname])) == 1: mate = find_mate(read, matebam) if mate is not None: outbam.write(mate) else: sys.exit('usage: %s <BAM> <region chrom:start-end>' % sys.argv[0])
mit
Python
6ad72a0c624abdda0df8d5c49366bfc597a12340
Add tests for utils experiment module
NLeSC/cptm,NLeSC/cptm
cptm/tests/test_utils_experiment.py
cptm/tests/test_utils_experiment.py
from nose.tools import assert_equal, assert_false from os import remove from os.path import join from json import dump from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \ topicFileName, opinionFileName, tarFileName, experimentName def setup(): global jsonFile global config global nTopics jsonFile = 'config.json' # create cofig.json params = {} with open(jsonFile, 'wb') as f: dump(params, f, sort_keys=True, indent=4) config = load_config(jsonFile) nTopics = 100 def teardown(): remove(jsonFile) def test_load_config_default_values(): params = {} params['inputData'] = None params['outDir'] = '/{}' params['testSplit'] = 20 params['minFreq'] = None params['removeTopTF'] = None params['removeTopDF'] = None params['nIter'] = 200 params['beta'] = 0.02 params['beta_o'] = 0.02 params['expNumTopics'] = range(20, 201, 20) params['nTopics'] = None params['nProcesses'] = None params['topicLines'] = [0] params['opinionLines'] = [1] params['sampleEstimateStart'] = None params['sampleEstimateEnd'] = None for p, v in params.iteritems(): yield assert_equal, v, params[p] def test_add_parameter(): pName = 'nTopics' yield assert_false, hasattr(config, pName) add_parameter(pName, nTopics, jsonFile) config2 = load_config(jsonFile) yield assert_equal, config2[pName], nTopics def test_thetaFileName(): config['nTopics'] = nTopics fName = thetaFileName(config) assert_equal(fName, '/theta_{}.csv'.format(nTopics)) def test_topicFileName(): config['nTopics'] = nTopics fName = topicFileName(config) assert_equal(fName, '/topics_{}.csv'.format(nTopics)) def test_opinionFileName(): config['nTopics'] = nTopics return join(params.get('outDir').format(''), 'opinions_{}_{}.csv'.format(name, nTopics)) #def experimentName(params): # fName = params.get('outDir') # fName = fName.replace('/{}', '') # _p, name = os.path.split(fName) # return name #def tarFileName(params): # nTopics = params.get('nTopics') # name = experimentName(params) # return os.path.join(params.get('outDir').format(''), # '{}_{}.tgz'.format(name, nTopics))
apache-2.0
Python
f73800f8e4ccd76d858c08d8cc8a72a6f2274fb6
Validate settings a tad later
kingosticks/mopidy,pacificIT/mopidy,mopidy/mopidy,hkariti/mopidy,adamcik/mopidy,glogiotatidis/mopidy,rawdlite/mopidy,mokieyue/mopidy,jodal/mopidy,dbrgn/mopidy,jmarsik/mopidy,abarisain/mopidy,jodal/mopidy,mokieyue/mopidy,mopidy/mopidy,liamw9534/mopidy,quartz55/mopidy,bencevans/mopidy,swak/mopidy,quartz55/mopidy,rawdlite/mopidy,pacificIT/mopidy,jmarsik/mopidy,jmarsik/mopidy,priestd09/mopidy,diandiankan/mopidy,dbrgn/mopidy,priestd09/mopidy,quartz55/mopidy,priestd09/mopidy,jcass77/mopidy,glogiotatidis/mopidy,diandiankan/mopidy,tkem/mopidy,tkem/mopidy,ZenithDK/mopidy,bacontext/mopidy,diandiankan/mopidy,ZenithDK/mopidy,mokieyue/mopidy,SuperStarPL/mopidy,jcass77/mopidy,tkem/mopidy,bencevans/mopidy,SuperStarPL/mopidy,woutervanwijk/mopidy,dbrgn/mopidy,SuperStarPL/mopidy,swak/mopidy,ali/mopidy,hkariti/mopidy,hkariti/mopidy,glogiotatidis/mopidy,bencevans/mopidy,bacontext/mopidy,vrs01/mopidy,ali/mopidy,abarisain/mopidy,adamcik/mopidy,swak/mopidy,bencevans/mopidy,kingosticks/mopidy,ali/mopidy,ZenithDK/mopidy,rawdlite/mopidy,ali/mopidy,mokieyue/mopidy,woutervanwijk/mopidy,bacontext/mopidy,bacontext/mopidy,adamcik/mopidy,glogiotatidis/mopidy,diandiankan/mopidy,hkariti/mopidy,jodal/mopidy,vrs01/mopidy,SuperStarPL/mopidy,ZenithDK/mopidy,vrs01/mopidy,pacificIT/mopidy,dbrgn/mopidy,vrs01/mopidy,mopidy/mopidy,jmarsik/mopidy,rawdlite/mopidy,liamw9534/mopidy,swak/mopidy,kingosticks/mopidy,tkem/mopidy,jcass77/mopidy,quartz55/mopidy,pacificIT/mopidy
mopidy/__main__.py
mopidy/__main__.py
import logging import multiprocessing import optparse import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))) from mopidy import get_version, settings, SettingsError from mopidy.core import CoreProcess from mopidy.utils import get_class from mopidy.utils.log import setup_logging from mopidy.utils.path import get_or_create_folder from mopidy.utils.settings import list_settings_optparse_callback logger = logging.getLogger('mopidy.main') def main(): options = parse_options() setup_logging(options.verbosity_level, options.dump) logger.info('-- Starting Mopidy --') get_or_create_folder('~/.mopidy/') settings.validate() core_queue = multiprocessing.Queue() output_class = get_class(settings.OUTPUT) backend_class = get_class(settings.BACKENDS[0]) frontend = get_class(settings.FRONTENDS[0])() frontend.start_server(core_queue) core = CoreProcess(core_queue, output_class, backend_class, frontend) core.start() logger.debug('Main done') def parse_options(): parser = optparse.OptionParser(version='Mopidy %s' % get_version()) parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbosity_level', help='less output (warning level)') parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbosity_level', help='more output (debug level)') parser.add_option('--dump', action='store_true', dest='dump', help='dump debug log to file') parser.add_option('--list-settings', action='callback', callback=list_settings_optparse_callback, help='list current settings') return parser.parse_args()[0] if __name__ == '__main__': try: main() except KeyboardInterrupt: logger.info(u'Interrupted by user') sys.exit(0) except SettingsError, e: logger.error(e) sys.exit(1) except SystemExit, e: logger.error(e) sys.exit(1)
import logging import multiprocessing import optparse import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))) from mopidy import get_version, settings, SettingsError from mopidy.core import CoreProcess from mopidy.utils import get_class from mopidy.utils.log import setup_logging from mopidy.utils.path import get_or_create_folder from mopidy.utils.settings import list_settings_optparse_callback logger = logging.getLogger('mopidy.main') def main(): options = _parse_options() setup_logging(options.verbosity_level, options.dump) settings.validate() logger.info('-- Starting Mopidy --') get_or_create_folder('~/.mopidy/') core_queue = multiprocessing.Queue() output_class = get_class(settings.OUTPUT) backend_class = get_class(settings.BACKENDS[0]) frontend = get_class(settings.FRONTENDS[0])() frontend.start_server(core_queue) core = CoreProcess(core_queue, output_class, backend_class, frontend) core.start() logger.debug('Main done') def _parse_options(): parser = optparse.OptionParser(version='Mopidy %s' % get_version()) parser.add_option('-q', '--quiet', action='store_const', const=0, dest='verbosity_level', help='less output (warning level)') parser.add_option('-v', '--verbose', action='store_const', const=2, dest='verbosity_level', help='more output (debug level)') parser.add_option('--dump', action='store_true', dest='dump', help='dump debug log to file') parser.add_option('--list-settings', action='callback', callback=list_settings_optparse_callback, help='list current settings') return parser.parse_args()[0] if __name__ == '__main__': try: main() except KeyboardInterrupt: logger.info(u'Interrupted by user') sys.exit(0) except SettingsError, e: logger.error(e) sys.exit(1) except SystemExit, e: logger.error(e) sys.exit(1)
apache-2.0
Python
69b715ab99522967a6b1bb8f4abfc4f2b1e60912
check most of the analyzer code by importing the analyzer itself
cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo,cuckoobox/cuckoo
tests/windows/test_analyzer.py
tests/windows/test_analyzer.py
# Copyright (C) 2017 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. def test_analyzer(): """Simply imports the analyzer module to at least load most of the code.""" import analyzer analyzer # Fake usage.
mit
Python
4853257696373d248884efd1532af8a81c34ee93
Add LiveComposite creation helper script
bitsawer/renpy-shader,bitsawer/renpy-shader
tools/create_live_composite.py
tools/create_live_composite.py
""" Helper script for cropping images and creating a RenPy LiveComposite for them. Quite specific and mostly useful for processing images exported from a rendering program like Blender or from Photoshop layers. Requires Pillow Python image processing library to be installed. Command line example (current working directory at the base of this project): python tools/create_live_composite.py ShaderDemo/game/images/doll This assumes all images in the source directory have the same size. The script crops them and creates an efficient LiveComposite that can be used for rigging or just normally. The resulting LiveComposite is written into a .rpy-file in the target directory. """ import sys import os from PIL import Image IMAGES = ["png", "jpg"] POSTFIX = "crop" PAD = 5 sourceDir = sys.argv[1] sourceImages = [os.path.join(sourceDir, name) for name in os.listdir(sourceDir) if name.lower().split(".")[-1] in IMAGES] sourceImages.sort() def findValidImages(images): valid = [] size = None for path in sourceImages: image = Image.open(path) if POSTFIX and POSTFIX in path.lower(): print("Skipping already cropped: %s" % path) elif size is None or image.size == size: size = image.size valid.append((path, image)) else: print("Image %s has size %s, should be %s? Skipped." % (path, str(image.size), str(size))) return valid def getCropRect(image): x = 0 y = 0 x2 = image.size[0] y2 = image.size[1] box = image.getbbox() if box: return max(box[0] - PAD, 0), max(box[1] - PAD, 0), min(box[2] + PAD, image.size[0]), min(box[3] + PAD, image.size[1]) return x, y, x2, y2 def createName(path): parts = path.rsplit(".", 1) return parts[0] + POSTFIX + "." + parts[1] results = [] for path, image in findValidImages(sourceImages): rect = getCropRect(image) cropped = image.crop(rect) name = createName(path) cropped.save(name) print("Saved: %s. Cropped: %s" % (name, str(rect))) results.append((name, image, rect)) name = os.path.normcase(sourceDir).split(os.sep)[-1] with open(os.path.join(sourceDir, name + ".rpy"), "w") as f: base = results[0] f.write("#Automatically generated file\n\n") f.write("image %s = LiveComposite(\n" % name) f.write(" (%i, %i),\n" % base[1].size) for result in results: name, image, crop = result name = name[name.find("images"):].replace("\\", "/") f.write(" (%i, %i), \"%s\",\n" % (crop[0], crop[1], name)) f.write(")\n")
mit
Python
e74571c6505bdf99a94fc27dd1ea60e23f55db0a
Add strace_inputs.py to strace a test executable and detect its dependencies.
gavinp/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,ropik/chromium,gavinp/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,adobe/chromium,ropik/chromium,gavinp/chromium,adobe/chromium,ropik/chromium,ropik/chromium,adobe/chromium,adobe/chromium,yitian134/chromium,ropik/chromium,ropik/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium
tools/isolate/strace_inputs.py
tools/isolate/strace_inputs.py
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Runs strace on a test and processes the logs to extract the dependencies from the source tree. Automatically extracts directories where all the files are used to make the dependencies list more compact. """ import os import re import subprocess import sys BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR)) IGNORED = ( '/dev', '/etc', '/home', '/lib', '/proc', '/sys', '/tmp', '/usr', '/var', ) def gen_trace(cmd, cwd, logname, silent): """Runs strace on an executable.""" strace = ['strace', '-f', '-e', 'trace=open', '-o', logname] stdout = stderr = None if silent: stdout = subprocess.PIPE stderr = subprocess.PIPE cmd = [os.path.normpath(os.path.join(cwd, c)) for c in cmd] p = subprocess.Popen( strace + cmd, cwd=cwd, stdout=stdout, stderr=stderr) out, err = p.communicate() if p.returncode != 0: print 'Failure: %d' % p.returncode # pylint: disable=E1103 print ''.join(out.splitlines(True)[-100:]) print ''.join(err.splitlines(True)[-100:]) return p.returncode def parse_log(filename, blacklist): """Processes a strace log and returns the files opened and the files that do not exist. Most of the time, files that do not exist are temporary test files that should be put in /tmp instead. See http://crbug.com/116251 TODO(maruel): Process chdir() calls so relative paths can be processed. """ files = set() non_existent = set() for line in open(filename): # 1=pid, 2=filepath, 3=mode, 4=result m = re.match(r'^(\d+)\s+open\("([^"]+)", ([^\)]+)\)\s+= (.+)$', line) if not m: continue if m.group(4).startswith('-1') or 'O_DIRECTORY' in m.group(3): # Not present or a directory. continue filepath = m.group(2) if blacklist(filepath): continue if not os.path.isfile(filepath): non_existent.add(filepath) else: files.add(filepath) return files, non_existent def relevant_files(files, root): """Trims the list of files to keep the expected files and unexpected files. Unexpected files are files that are not based inside the |root| directory. """ expected = [] unexpected = [] for f in files: if f.startswith(root): expected.append(f[len(root):]) else: unexpected.append(f) return sorted(set(expected)), sorted(set(unexpected)) def extract_directories(files, root): """Detects if all the files in a directory were loaded and if so, replace the individual files by the directory entry. """ directories = set(os.path.dirname(f) for f in files) files = set(files) for directory in sorted(directories, reverse=True): actual = set( os.path.join(directory, f) for f in os.listdir(os.path.join(root, directory)) if not f.endswith(('.svn', '.pyc')) ) if not (actual - files): files -= actual files.add(directory + '/') return sorted(files) def strace_inputs(unittest, cmd): """Tries to load the logs if available. If not, strace the test.""" logname = os.path.join(BASE_DIR, os.path.basename(unittest)) if not os.path.isfile(logname): returncode = gen_trace(cmd, ROOT_DIR, logname, True) if returncode: return returncode def blacklist(f): """Strips ignored paths.""" return f.startswith(IGNORED) or f.endswith('.pyc') files, non_existent = parse_log(logname, blacklist) print('Total: %d' % len(files)) print('Non existent: %d' % len(non_existent)) for f in non_existent: print(' %s' % f) expected, unexpected = relevant_files(files, ROOT_DIR + '/') if unexpected: print('Unexpected: %d' % len(unexpected)) for f in unexpected: print(' %s' % f) simplified = extract_directories(expected, ROOT_DIR) print('Interesting: %d reduced to %d' % (len(expected), len(simplified))) for f in simplified: print(' %s' % f) return 0 def main(): if len(sys.argv) < 3: print >> sys.stderr, ( 'Usage: strace_inputs.py [testname] [cmd line...]\n' '\n' 'Example:\n' ' ./strace_inputs.py base_unittests testing/xvfb.py out/Release ' 'out/Release/base_unittests') return 1 return strace_inputs(sys.argv[1], sys.argv[2:]) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
Python
e94192a4c549e46ae0a155dbfa634ebde992903a
Create netntlm2hashcat.py
ins1gn1a/NetNTLM2HashCat,ins1gn1a/NetNTLM2HashCat
netntlm2hashcat.py
netntlm2hashcat.py
#!/usr/bin/env python import sys import re import argparse # Arg Input (Like a pirate) p = argparse.ArgumentParser(description='Convert NetNTLM John Hashes to Hashcat Format') p.add_argument('-i','--hash',action='store_true',help='Enter one-time hash input mode',required=False) p.add_argument('-f','--file',dest='file',help='Path to file containing multiple hashes',required=False,default="") p.add_argument('-o','--output',dest='output',help='File path to save the converted hashes',required=False) a = p.parse_args() # RegEx to re-arrange the hash reg = re.compile('(.*?):(\$.*?)\$(.*?)\$(.*)') if a.hash: try: hash = raw_input("Enter your hash:\n") if hash: print reg.sub(r'\1::::\4:\3',hash) except KeyboardInterrupt: sys.exit("\n") except: sys.exit("Error: Something is broken\n") elif a.file: try: with open(a.file) as temp: for line in temp: outhash = reg.sub(r'\1::::\4:\3',line) outhash = outhash.rstrip('\n\n') if a.output is None: print outhash else: with open(a.output,'w') as f: f.write(outhash) f.close() except KeyboardInterrupt: sys.exit("\n") except: sys.exit("Error: Input file doesn't exist.\n") else: p.print_help()
mit
Python
e3b025ae738b6aff8fb873bb41d1cc13e0845131
Create geddit-backend.py
azimos/geddit
geddit-backend.py
geddit-backend.py
#!/usr/bin/python import requests import json # Import modules for CGI handling import cgi, cgitb # Create instance of FieldStorage form = cgi.FieldStorage() # Get data from fields user_title = form.getvalue('search_title') print "Content-type: text/html\n\n"; # Setting attributes to send to Wikipedia API baseurl = 'http://en.wikipedia.org/w/api.php' search_atts = {} search_atts['action'] = 'query' search_atts['list'] = 'search' search_atts['srwhat'] = 'text' search_atts['format'] = 'json' search_atts['srsearch'] = user_title search_resp = requests.get(baseurl, params = search_atts) search_data = search_resp.json() title = search_data["query"]["search"][0]["title"] # Make the title with no space which will be needed for making a url link to send for summary title_w_no_space = "" for i in title: if i==" ": title_w_no_space = title_w_no_space + "_" else: title_w_no_space = title_w_no_space + i # Getting related topics using the result given by Wikipedia API topics = [] for key in search_data["query"]["search"]: topics.append (key["title"]) topics = topics [1:len(topics)] # Summarizing the content: # setting attributes for to send to Smmry API link_for_smmry = 'https://en.wikipedia.org/wiki/' + title_w_no_space smmry_base_url = 'http://api.smmry.com/' #smmry_atts = {} #smmry_atts ['SM_URL'] = 'https://en.wikipedia.org/wiki/Guyana' #smmry_atts ['SM_API_KEY'] = '6F297A53E3' # represents your registered API key. # Optional, X represents the webpage to summarize. #smmry_atts ['SM_LENGTH'] = N # Optional, N represents the number of sentences returned, default is 7 #smmry_atts ['SM_KEYWORD_COUNT'] = N # Optional, N represents how many of the top keywords to return #smmry_atts ['SM_QUOTE_AVOID'] # Optional, summary will not include quotations #smmry_atts ['SM_WITH_BREAK'] # Optional, summary will contain string [BREAK] between each sentence api_key_link = '&SM_API_KEY=6F297A53E3&SM_URL=' api_lenght = 'SM_LENGTH=7&SM_WITH_BREAK' #print api_key_link api_link = smmry_base_url + api_lenght + api_key_link + link_for_smmry #smmry_resp = requests.get('http://api.smmry.com/&SM_API_KEY=6F297A53E3&SM_URL=https://en.wikipedia.org/wiki/Guyana') smmry_resp = requests.get(api_link) smmry_data = smmry_resp.json() content= '<p>Try adding another key word.</p><a style="color:white;" id="backbtn" href="#" onclick="myFunction()" >Go back.</a>' try: content = smmry_data['sm_api_content'] except: pass content_with_non_ascii = "" for word in content: if ord(word) < 128: content_with_non_ascii+=word else: content_with_non_ascii+= "?" if len(content_with_non_ascii) >0: content = content_with_non_ascii # replacing "[BREAK]"s with a new line while "[BREAK]" in content: length = len (content) break_position = content.find("[BREAK]") content = content [0:break_position] + "<br><br>" + content [break_position+7: length] print '<div id="all-cont-alt"><div class="select-nav"><div id="nav-top-main"><a id="backbtn" href="#" onclick="myFunction()" ><i style=" position: relative;margin-left: 25px;background-color: #00cfb9;padding: 13px;top: 74px;border-radius: 16px;color: #ffffff;text-align: left;" class= "fa fa-chevron-left fa-2x"></i></a><h1>Geddit</h1></div></div>' print '<div id="loaddddd"></div><div id="contentss">' print '<h1 id="user-title">' print user_title print "</h1>" print content print '</div></div>' print '<h3 class="related">Related Topics</h3>' print '<div id="rel-holder">' for key in topics: if all(ord(c) < 128 for c in key): print '<h5 class="related-topics" onclick="relatedFunction();">' print key print '</h5>' else: pass print '</div>'
mit
Python
e6e5fbb671c2539f4f82c6eaca51fbf400133482
Write a silly Python script to compute some hard coded info from the generated ARM match table, which is substantially more efficient than dealing with tblgen.
GPUOpen-Drivers/llvm,chubbymaggie/asap,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,dslab-epfl/asap,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,chubbymaggie/asap,apple/swift-llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,chubbymaggie/asap,dslab-epfl/asap,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,chubbymaggie/asap,dslab-epfl/asap,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,dslab-epfl/asap
utils/Target/ARM/analyze-match-table.py
utils/Target/ARM/analyze-match-table.py
#!/usr/bin/env python def analyze_match_table(path): # Extract the instruction table. data = open(path).read() start = data.index("static const MatchEntry MatchTable") end = data.index("\n};\n", start) lines = data[start:end].split("\n")[1:] # Parse the instructions. insns = [] for ln in lines: ln = ln.split("{", 1)[1] ln = ln.rsplit("}", 1)[0] a,bc = ln.split("{", 1) b,c = bc.split("}", 1) code, string, converter, _ = [s.strip() for s in a.split(",")] items = [s.strip() for s in b.split(",")] _,features = [s.strip() for s in c.split(",")] assert string[0] == string[-1] == '"' string = string[1:-1] insns.append((code,string,converter,items,features)) # For every mnemonic, compute whether or not it can have a carry setting # operand and whether or not it can have a predication code. mnemonic_flags = {} for insn in insns: mnemonic = insn[1] items = insn[3] flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set()) flags.update(items) mnemonics = set(mnemonic_flags) ccout_mnemonics = set(m for m in mnemonics if 'MCK_CCOut' in mnemonic_flags[m]) condcode_mnemonics = set(m for m in mnemonics if 'MCK_CondCode' in mnemonic_flags[m]) noncondcode_mnemonics = mnemonics - condcode_mnemonics print ' || '.join('Mnemonic == "%s"' % m for m in ccout_mnemonics) print ' || '.join('Mnemonic == "%s"' % m for m in noncondcode_mnemonics) def main(): import sys if len(sys.argv) == 1: import os from lit.Util import capture llvm_obj_root = capture(["llvm-config", "--obj-root"]) file = os.path.join(llvm_obj_root, "lib/Target/ARM/ARMGenAsmMatcher.inc") elif len(sys.argv) == 2: file = sys.argv[1] else: raise NotImplementedError analyze_match_table(file) if __name__ == '__main__': main()
apache-2.0
Python
5b2c1650059f9e4b69b6bab1d8ce88177f449e02
Add basic test for import
iModels/foyer,mosdef-hub/foyer,mosdef-hub/foyer,iModels/foyer
foyer/tests/test_external_forcefields.py
foyer/tests/test_external_forcefields.py
import pytest def test_basic_import(): import foyer assert 'external_forcefields' in dir(foyer) import foyer.external_forcefields
mit
Python
e5d58cc795541b5e4e8f791a441a4369df17ee19
Add first exercise
MindCookin/python-exercises
cuadradoDentroDeRangoDado.py
cuadradoDentroDeRangoDado.py
#!/usr/bin/env python def main(): def cuadr(num): return num * num def nom_cuad(num): return ("%d -> %d") % (num, cuadr(num)) def promptCuadr(): myNum1 = input("Enter num1: ") myNum2 = input("Enter num2: ") minimum = min(myNum1, myNum2) maximum = max(myNum1, myNum2) arr = [nom_cuad(x) for x in range(minimum, maximum) + [maximum]] multiline = "\n".join(arr) print multiline print "==== Mostrar el cuadrado de los numeros dentro del rango introducido ====" promptCuadr() print "Operacion finalizada" main()
apache-2.0
Python
aab833a4a267ed46e83a5968e87d357ae3a5a12b
Add new DemoStream example corresponding to the LSL4Unity Project
xfleckx/BeMoBI_Tools,xfleckx/BeMoBI_Tools,xfleckx/BeMoBI_Tools
utils/LSL_Tests/RecieveDemoStream.py
utils/LSL_Tests/RecieveDemoStream.py
"""Example program to show how to read a marker time series from LSL.""" import sys sys.path.append('./pylsl') # help python find pylsl relative to this example program from pylsl import StreamInlet, resolve_stream # first resolve an EEG stream on the lab network targetStreamType = 'Unity.Quaternion' print 'looking for an stream of type ' + targetStreamType streams = resolve_stream('type', targetStreamType) streamsFound = len(streams) if (streamsFound > 0): print 'found ' + str(streamsFound) else: print 'found none', # create a new inlet to read from the stream inlet = StreamInlet(streams[0]) while True: sample, timestamp = inlet.pull_sample() if(sample): print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r", sys.stdout.flush()
mit
Python
897371dac52c38b96b6a1a92cd8ce36e9b2d1003
Add django admin page for HQOauthApplication
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/apps/hqwebapp/admin.py
corehq/apps/hqwebapp/admin.py
from django.contrib import admin from corehq.apps.hqwebapp.models import HQOauthApplication @admin.register(HQOauthApplication) class HQOauthApplicationAdmin(admin.ModelAdmin): list_display = ( "id", "application_id", "application_name", "application_user", "application_client_type", "application_authorization_grant_type" ) def application_id(self, obj): return obj.application.id def application_name(self, obj): return obj.application.name def application_user(self, obj): return obj.application.user.id def application_client_type(self, obj): return obj.application.client_type def application_authorization_grant_type(self, obj): return obj.application.authorization_grant_type
bsd-3-clause
Python
09a8f4efcfc99f7add4d055465de621a47f06ee8
Add management command to sanitize 2fa sessions
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
corehq/apps/hqadmin/management/commands/clean_2fa_sessions.py
corehq/apps/hqadmin/management/commands/clean_2fa_sessions.py
from getpass import getpass from importlib import import_module from packaging import version from pkg_resources import DistributionNotFound, get_distribution from django.conf import settings from django.core.cache import caches from django.core.management.base import BaseCommand class Command(BaseCommand): help = ( "Remove outdated/sensitive information from active Django sessions. " "See https://github.com/Bouke/django-two-factor-auth/security/advisories/GHSA-vhr6-pvjm-9qwf" ) def add_arguments(self, parser): parser.add_argument( '--one-session', action='store_true', default=False, help='Lookup one session only (will prompt for a session key).', ) parser.add_argument( '--dry-run', action='store_true', default=False, help='Count the number of sessions that would be affected, ' 'but do not modify them.', ) def handle(self, one_session=False, dry_run=False, **options): if dry_run: print("DRY RUN sessions will not be modified") tf_ver = get_two_factor_version() if tf_ver and version.parse(tf_ver) < version.parse("1.12"): print(f"WARNING old/insecure django-two-factor-auth version detected: {tf_ver}") print("Please run this tool again after upgrading.") else: print(f"found django-two-factor-auth version {tf_ver}") print("scanning sessions...") count = i = 0 for i, session in enumerate(iter_sessions(one_session), start=1): if i % 10000 == 0: print(f"processed {i} sessions") if has_sensitive_info(session): count += 1 if not dry_run: sanitize(session) if dry_run: print(f"DRY RUN {count} of {i} sessions need to be sanitized") else: print(f"Sanitized {count} of {i} sessions") def sanitize(session): for data in iter_wizard_login_views(session): del data["step_data"] del data["validated_step_data"] session.save() assert not has_sensitive_info(session) def iter_sessions(one_session): """Iterate over one or all existing django sessions Assumes that redis is the default cache in which all sessions are stored. """ assert settings.SESSION_ENGINE == "django.contrib.sessions.backends.cache", \ f"unsupported session engine: {settings.SESSION_ENGINE}" engine = import_module(settings.SESSION_ENGINE) if one_session: session_key = getpass(prompt="Session key: ") yield engine.SessionStore(session_key) return cache = caches[settings.SESSION_CACHE_ALIAS] prefix_length = len(engine.SessionStore.cache_key_prefix) for key in cache.iter_keys(engine.SessionStore.cache_key_prefix + "*"): session_key = key[prefix_length:] yield engine.SessionStore(session_key) def has_sensitive_info(session): def has_key(data, path): value = data for name in path: if not isinstance(value, dict) or name not in value: return False value = value[name] return True return any( has_key(data, STEP_DATA_PATH) or has_key(data, VALIDATED_STEP_DATA_PATH) for data in iter_wizard_login_views(session) ) def iter_wizard_login_views(session): for key, data in session.items(): if key.startswith("wizard_") and key.endswith("_login_view"): yield data STEP_DATA_PATH = ["step_data", "auth", "auth-password"] VALIDATED_STEP_DATA_PATH = ["validated_step_data", "auth", "password"] def get_two_factor_version(): try: dist = get_distribution("django-two-factor-auth") except DistributionNotFound: return None return dist.version
bsd-3-clause
Python
fb884d3453b42b68aa7ecc7b0523bf1460b6b9e0
Add missing EFS patch
cloudtools/troposphere,cloudtools/troposphere
scripts/patches/efs.py
scripts/patches/efs.py
patches = [ { "op": "replace", "path": "/ResourceTypes/AWS::EFS::AccessPoint/Properties/AccessPointTags/ItemType", "value": "Tag", }, { "op": "replace", "path": "/ResourceTypes/AWS::EFS::FileSystem/Properties/FileSystemTags/ItemType", "value": "Tag", }, ]
bsd-2-clause
Python
1ae811c79b1cbc28b2f71e8f2bb01b44cc3aa2b9
Improve import malware hashes cron
gdestuynder/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,mozilla/MozDef,mozilla/MozDef,mpurzynski/MozDef,mozilla/MozDef,Phrozyn/MozDef,jeffbryner/MozDef,ameihm0912/MozDef,ameihm0912/MozDef,mozilla/MozDef,Phrozyn/MozDef,mpurzynski/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,mpurzynski/MozDef,Phrozyn/MozDef,mpurzynski/MozDef
cron/import_malware_hashes.py
cron/import_malware_hashes.py
#!/usr/bin/env python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # Copyright (c) 2017 Mozilla Corporation # # Contributors: # Brandon Myers bmyers@mozilla.com import os import sys from configlib import getConfig, OptionParser from datetime import datetime from datetime import timedelta from pytx.access_token import access_token from pytx import Malware sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib')) from utilities.logger import logger, initLogger from utilities.toUTC import toUTC from elasticsearch_client import ElasticsearchClient from state import State def pull_malware_hashes(since_date, until_date): query_params = { 'since': str(since_date), 'until': str(until_date), 'full_response': True, } logger.info('Querying threat exchange with params {}'.format(query_params)) results = Malware.objects(**query_params) malware_data = [] for result in results['data']: created_date = toUTC(datetime.now()).isoformat() es_doc = { 'created_on': created_date, 'details': result } malware_data.append(es_doc) return malware_data def main(): logger.info('Connecting to Elasticsearch') client = ElasticsearchClient(options.esservers) logger.info('Connecting to threat exchange') access_token(options.appid, options.appsecret) state = State(options.state_file_name) current_timestamp = toUTC(datetime.now()).isoformat() # We're setting a default for the past 2 days of data # if there isnt a state file since_date_obj = toUTC(datetime.now()) - timedelta(days=2) since_date = since_date_obj.isoformat() if 'lastrun' in state.data.keys(): since_date = state.data['lastrun'] malware_hashes_docs = pull_malware_hashes(since_date=since_date, until_date=current_timestamp) for malware_hash_doc in malware_hashes_docs: client.save_object(index='threat-exchange', doc_type='malware_hashes', body=malware_hash_doc) state.data['lastrun'] = current_timestamp state.save() def initConfig(): options.output = getConfig('output', 'stdout', options.configfile) options.sysloghostname = getConfig('sysloghostname', 'localhost', options.configfile) options.syslogport = getConfig('syslogport', 514, options.configfile) options.state_file_name = getConfig('state_file_name', '{0}.state'.format(sys.argv[0]), options.configfile) # threat exchange options options.appid = getConfig('appid', '', options.configfile) options.appsecret = getConfig('appsecret', '', options.configfile) # elastic search server settings options.esservers = list(getConfig('esservers', 'http://localhost:9200', options.configfile).split(',')) if __name__ == '__main__': parser = OptionParser() parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use") (options, args) = parser.parse_args() initConfig() initLogger(options) main()
mpl-2.0
Python
58852970847bab30fee18e6ab824b24bc75d389f
Add the package-cleaning script
prophile/polypkg,prophile/polypkg
clean-packages.py
clean-packages.py
# WARNING: HERE BE DRAGONS import yaml import os.path import urllib.parse from copy import deepcopy urllib.parse.uses_relative.append('github') urllib.parse.uses_netloc.append('github') with open('packages.yaml') as f: package_db = yaml.load(f) def strip_prefix(prefix, url): for n in range(len(url) - 1, 0, -1): component = url[n:] joined = urllib.parse.urljoin(prefix, component) if joined == url: return component return url def clean_package(value): backup = deepcopy(value) if 'base' in value: old_base = value['base'] del value['base'] value['files'] = {fn: urllib.parse.urljoin(old_base, val) for fn, val in value['files'].items()} prefix = os.path.commonprefix(value['files'].values()) if '/' not in prefix: return backup prefix = prefix[0:prefix.rindex('/')+1] if len(prefix) > 12: value['base'] = prefix value['files'] = {fn: strip_prefix(prefix, url) for fn, url in value['files'].items()} return value package_db = {key: clean_package(value) for key, value in package_db.items()} with open('packages.yaml', 'w') as f: yaml.dump(package_db, f, default_flow_style = False)
mit
Python
2e0fbcb3ec1c2f0311d7ee4bbfeac33662f66089
Monitor process using subprocess module
PSJoshi/python_scripts
monitor_process.py
monitor_process.py
import subprocess """ If the program is running "ps -ef | grep program" will return 2 or more rows (one with the program itself and the second one with "grep program"). Otherwise, it will only return one row ("grep program") You can trigger the alert on this if required. """ def monitor_process(name): args=['ps','-ef'] args1=['grep','-c','%s' %name] process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False) process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False) # Allow process_ps to receive a SIGPIPE if process_monitor exits. process_ps.stdout.close() return process_monitor.communicate()[0] if __name__== "__main__": print monitor_process('firefox')
apache-2.0
Python
0ae60d170c3a8fd33fac3b1283e646a7018027df
Add expertise removal migration
ctsit/qipr_approver,DevMattM/qipr_approver,ctsit/qipr_approver,DevMattM/qipr_approver,ctsit/qipr_approver,ctsit/qipr_approver,DevMattM/qipr_approver,DevMattM/qipr_approver,ctsit/qipr_approver,DevMattM/qipr_approver
qipr_approver/approver/migrations/0007_auto_20170227_1533.py
qipr_approver/approver/migrations/0007_auto_20170227_1533.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-27 15:33 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('approver', '0006_auto_20170222_1424'), ] operations = [ migrations.RemoveField( model_name='expertise', name='created_by', ), migrations.RemoveField( model_name='expertise', name='last_modified_by', ), migrations.AlterField( model_name='person', name='expertise', field=models.ManyToManyField(to='approver.Descriptor'), ), migrations.DeleteModel( name='Expertise', ), ]
apache-2.0
Python
b629a8e6346359683e637fd8e2f34f1d704ad1bc
Add missing test
vahtras/util
test/test_full.py
test/test_full.py
import numpy as np from util.full import matrix def assert_(this, ref): print this print ref assert np.allclose(this, ref) def test_diag(): ref = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] this = matrix.diag([1,1,1]) assert_(this, ref)
mit
Python
5e9b804ef20d71aa84cb4d3cdd8b3bad9863cf11
add validator
arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok,arturtamborski/wypok
sections/validators.py
sections/validators.py
import re from django.core.validators import RegexValidator section_name_validator = RegexValidator( r'^[a-zA-Z][a-zA-Z0-9]{1,19}$', 'This field can contain only characters a-zA-Z0-9 and be max 20 characters long', code='invalid' )
mit
Python
e7db8f3dc4d945185a99b5b62ae0b528959651ac
add python version
Respekt1/linux_scripts_generall
versioncheck/python_version.py
versioncheck/python_version.py
from invoke import task from subprocess import call import invoke def check_invoke_version(ctx): minimal_verion = "0.15.0" if minimal_verion > invoke.__version__: print("Your python-invoke version is too old (currently "+invoke.__version__+"). Please update to version "+minimal_verion+" or higher.") print("call: pip install invoke --upgrade") correct = False response = False print("\nDo you want to resume with a old version? [YES/NO]?") while response != True: choice = raw_input().lower() if choice in yes: correct = True response = True elif choice in no: correct = False response = True else: sys.stdout.write("Please respond with 'yes' or 'no'") if correct == False: return False return True
bsd-2-clause
Python
39cbea7183a77495173b10aef4d9f6ac10ad15f6
Add initial layout of resources
chop-dbhi/django-objectset,chop-dbhi/django-objectset
objectset/resources.py
objectset/resources.py
from django.conf.urls import patterns, url from django.http import HttpResponse from restlib2.resources import Resource from restlib2.http import codes from restlib2.params import Parametizer, param_cleaners from preserialize.serialize import serialize SET_OPERATIONS = { 'and': '__and__', 'or': '__or__', 'xor': '__xor__', 'sub': '__sub__', } INPLACE_SET_OPERATIONS = { 'and': '__iand__', 'or': '__ior__', 'xor': '__ixor__', 'sub': '__isub__', } class SetParametizer(Parametizer): embed = False def clean_embed(self, value): return param_cleaners.clean_bool(value) class BaseSetResource(Resource): parametizer = SetParametizer model = None template = None object_template = None form_class = None def get_params(self, request): return self.parametizer().clean(request.GET) def get_serialize_template(self, request, **kwargs): "Prepare the serialize template" template = self.template or {} relation = self.model._set_object_rel if kwargs.get('embed', False): template.setdefault('exclude', [relation]) elif self.object_template: template.setdefault('related', {}) if relation not in template['related']: template['related'][relation] = self.object_template def get_queryset(self, request): return self.model.objects.all() def get_object(self, request, **kwargs): try: return self.get_queryset(request).get(**kwargs) except self.model.DoesNotExist: pass class SetsResource(BaseSetResource): def get(self, request): params = self.get_params(request) template = self.get_serialize_template(request, **params) return serialize(self.get_queryset(request), **template) def post(self, request): form = self.form_class(request.data) if form.is_valid(): instance = form.save() params = self.get_params(request) template = self.get_serialize_template(request, **params) return serialize(instance, **template) return HttpResponse(dict(form.errors), status=codes.unprocessable_enity) class SetResource(BaseSetResource): def is_not_found(self, request, response, pk): instance = self.get_object(pk=pk) if instance is None: return True request.instance = instance def get(self, request, pk): return serialize(request.instance, **self.template) def put(self, request, pk): form = self.form_class(request.data, instance=request.instance) if form.is_valid(): form.save() return HttpResponse(status=codes.no_content) return HttpResponse(dict(form.errors), status=codes.unprocessable_enity) def delete(self, request, pk): request.instance.delete() return HttpResponse(status=codes.no_content) class SetObjectsResource(BaseSetResource): pass class SetOperationsResource(BaseSetResource): def post(request, pk, *args): pass def get_url_patterns(resources): """Returns urlpatterns for the defined resources. `resources` is a dict corresponding to each resource: - `sets` => SetsResource - `set` => SetResource - `operations` => SetOperatiosnResource - `objects` => SetObjectsResource """ return patterns( '', url(r'^$', resources['sets'](), name='sets'), url(r'^(?P<pk>\d+)/$', resources['set'](), name='set'), url(r'^(?P<pk>\d+)/objects/$', resources['objects'](), name='objects'), url(r'^(?P<pk>\d+)/(?:(and|or|xor|sub)/(\d+)/)+/$', resources['operations'](), name='operations'), )
bsd-2-clause
Python
ca9ed2756a12a2587f5b4d021597d2229196da50
Add migration to add china region
prattl/teamfinder,prattl/teamfinder,prattl/teamfinder,prattl/teamfinder
api/common/migrations/0007_add_china_region.py
api/common/migrations/0007_add_china_region.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-24 21:52 from __future__ import unicode_literals from django.db import migrations def forwards(apps, schema_editor): Region = apps.get_model('common.Region') region_to_add = 'China' try: Region.objects.get(name=region_to_add) except Region.DoesNotExist: Region.objects.create(name=region_to_add) class Migration(migrations.Migration): dependencies = [ ('common', '0006_emailrecord'), ] operations = [ migrations.RunPython(forwards, migrations.RunPython.noop) ]
apache-2.0
Python
680b2cb1488f83aef5b45476e23bd93a90069872
Create Content Loader app to Herd/DM standards - Configure Pyinstaller
FINRAOS/herd,FINRAOS/herd,FINRAOS/herd,FINRAOS/herd,FINRAOS/herd
herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py
herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py
hiddenimports = [ 'numpy', 'pandas._libs.tslibs.timedeltas', 'pandas._libs.tslibs.nattype', 'pandas._libs.tslibs.np_datetime', 'pandas._libs.skiplist' ]
apache-2.0
Python
56d14e7b0386588afd39f2413fafe0b9ba41806d
Access checking unit tests for SlotsTransferAdminPage.
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py
tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py
# Copyright 2013 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for slot transfer admin view.""" from tests import profile_utils from tests import test_utils class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase): """Unit tests for SlotsTransferAdminPage class.""" def setUp(self): self.init() self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name() def testLoneUserAccessForbidden(self): response = self.get(self.url) self.assertResponseForbidden(response) self.assertErrorTemplatesUsed(response) def testStudentAccessForbidden(self): self.data.createStudent() response = self.get(self.url) self.assertResponseForbidden(response) self.assertErrorTemplatesUsed(response) def testMentorAccessForbidden(self): self.data.createMentor(self.org) response = self.get(self.url) self.assertResponseForbidden(response) self.assertErrorTemplatesUsed(response) def testOrgAdminAccessForbidden(self): self.data.createOrgAdmin(self.org) response = self.get(self.url) self.assertResponseForbidden(response) self.assertErrorTemplatesUsed(response) def testHostAccessGranted(self): self.data.createHost() response = self.get(self.url) self.assertResponseOK(response)
apache-2.0
Python
db446bf6dc4255f556f20235d2bdc28fa056ad46
Add list_owners.py to list shared folder owners
blokeley/backup_dropbox,blokeley/dfb
list_owners.py
list_owners.py
"""List all shared folders and their owners.""" import logging import os from typing import Iterator import dropbox from backup import File, setup_logging, get_members, get_files def get_folder_members(team: dropbox.DropboxTeam, folder: File) \ -> Iterator[dropbox.sharing.UserMembershipInfo]: """Yield UserMembershipInfo objects which contain access level information (whether user is an owner, editor or viewer of a shared folder). """ user = team.as_user(folder.member.profile.team_member_id) members = user.sharing_list_folder_members(folder.file.shared_folder_id) for member in members.users: yield member while members.cursor: members = user.sharing_list_folder_members_continue(members.cursor) for member in members.users: yield member def main(): setup_logging() logger = logging.getLogger('main') logger.info('Please wait up to tens of minutes...') shared_folders = set() team = dropbox.DropboxTeam(os.environ['DROPBOX_TEAM_TOKEN']) for member in get_members(team): logger.debug(f'Checking {member.profile.name.display_name}') for f in get_files(member, team): path = f.file.path_display logger.debug(f'Checking {path}') # Find out if it is a shared folder try: if not f.file.sharing_info.parent_shared_folder_id: shared_folders.add(f) except AttributeError: logger.debug(f'{path} is not a shared folder') for sf in shared_folders: path = sf.file.path_display for member in get_folder_members(team, sf): name = member.user.display_name logger.debug(f'{path} : {name} : {member.access_type}') if member.access_type.is_owner(): logger.info(f'{path} is owned by {name}') break else: # No owner found for the shared folder logger.warning(f'No owner found for {path}') if __name__ == "__main__": main()
apache-2.0
Python
1c094fe58df0fa57884752be7f64ee9755e433f1
Create __init__.py
tKhan719/Theta
tests/__init__.py
tests/__init__.py
mit
Python
6edc4700f755380b8b9099ae78619cbd225a2790
add API tests
pfe-asr-2014/tsp-mooc-overview,pfe-asr-2014/tsp-mooc-overview
tests/api_test.py
tests/api_test.py
import overview, unittest, mock, json from overview.services import Services class ApiV1Test(unittest.TestCase): def send_patch_json(self, url, json_data): return self.app.patch(url, data = json.dumps(json_data), headers = [('Content-Type', 'application/json')]) def setUp(self): self.app = overview.app.test_client() @mock.patch.object(Services, 'docker_state') def test_get_docker_state(self, mock_docker_state): mock_docker_state.return_value = {'message':'docker_state_by_services'} rv = self.app.get('/api/v1/docker') self.assertEqual(rv.data, '{\n "message": "docker_state_by_services"\n}') @mock.patch.object(Services, 'states') def test_get_services_state(self, mock_services_state): mock_services_state.return_value = {'message':'services_state'} rv = self.app.get('/api/v1/services') self.assertEqual(rv.data, '{\n "message": "services_state"\n}') @mock.patch.object(Services, 'change') def test_patch_service_state(self, mock_services_change): # When the change is valid (from services.change perspective) mock_services_change.return_value = None rv = self.send_patch_json('/api/v1/services/serviceId', { 'state': Services.STATE_RUNNING }) self.assertEqual(rv.data, '{\n "message": "Correctly applied. Change in progress."\n}') # Verify that the change has been given mock_services_change.assert_called_with('serviceId', Services.STATE_RUNNING) # When the change is invalid (from services.change perspective) mock_services_change.return_value = 'error description' rv = self.send_patch_json('/api/v1/services/serviceId', { 'state': Services.STATE_RUNNING }) self.assertEqual(rv.data, '{\n "error": "error description", \n' ' "message": "This change cannot be made"\n}')
mit
Python
0347d82e55382b9618158c4c5809c360e729c245
Create neworld_client.py
soul2867/OMOOC2py
_src/om2py4w/4wex0/neworld_client.py
_src/om2py4w/4wex0/neworld_client.py
#/usr/bin/env python # -*- coding: utf-8 -*- import sys # sys.setdefaultencoding() does not exist, here! reload(sys) # Reload does the trick! sys.setdefaultencoding('UTF8') from lxml import html import requests def read_note(): page = requests.get('http://localhost:8080/neworld') tree = html.fromstring(page.content) note_content = tree.xpath('//div[@class="note_content"]/text()') return note_content def write_note(mynote): wpage = requests.post('http://localhost:8080/neworld', data = {'notes': mynote}) def main(): while True: mynote = raw_input('>>> ') if mynote == "q": print ("Thanks for writing.") break elif mynote =="r": print read_note() else: write_note(mynote) if __name__ == "__main__": main()
mit
Python
3a4c922d353df5f5b3f3cabe24b04090b0a3fd08
test the serve command
mattrobenolt/warehouse,robhudson/warehouse,techtonik/warehouse,techtonik/warehouse,mattrobenolt/warehouse,mattrobenolt/warehouse,robhudson/warehouse
tests/test_cli.py
tests/test_cli.py
# Copyright 2013 Donald Stufft # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function from __future__ import unicode_literals import pretend import werkzeug.serving from warehouse.cli import ServeCommand def test_serve(monkeypatch): run_simple = pretend.call_recorder( lambda host, port, app, use_reloader, use_debugger: None, ) monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple) host, port, app, use_reloader, use_debugger = ( pretend.stub() for x in range(5) ) ServeCommand()( app, host, port, reloader=use_reloader, debugger=use_debugger, ) assert run_simple.calls == [ pretend.call( host, port, app, use_reloader=use_reloader, use_debugger=use_debugger, ), ]
apache-2.0
Python
2299343d8b10658cc6682b23dbf9be9d5fd290f6
Add unit test for data integrity.
c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis,c4fcm/WhatWeWatch-Analysis
tests/testdata.py
tests/testdata.py
import ConfigParser import csv import unittest class DataTest(unittest.TestCase): def setUp(self): config = ConfigParser.RawConfigParser() config.read('../app.config') # Load the data from the csv into an array self.data = [] with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile: reader = csv.reader(csvfile) # Skip header and parse data reader.next() for row in reader: self.data.append([s.strip() for s in row]) def test_complete(self): '''Ensure there are no day/country pairs missing data''' date_country = dict() dates = set() countries = set() for date, country, video_id in self.data: dates.add(date) countries.add(country) date_country[date] = date_country.get(date, {}) date_country[date][country] = date_country[date].get(country, 0) + 1 for date in dates: for country in countries: count = date_country.get(date,{}).get(country,0) self.assertNotEqual((date, country, count), (date, country, 0)) if __name__ == '__main__': unittest.main()
bsd-3-clause
Python
78aebc48763b15dedc3aee65a18a2a39f46e5c30
add run module
zeaphoo/cocopot,zeaphoo/flagon
flagon/run.py
flagon/run.py
def run_simple(hostname, port, application, use_reloader=False, use_debugger=False): from wsgiref.simple_server import make_server from wsgiref.simple_server import WSGIRequestHandler, WSGIServer pass
mit
Python
b9399dbdfff80fec21cfed926779b67589835047
Create LettCombPhoneNum_002.py
cc13ny/algo,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/algo,Chasego/codi,Chasego/codi,Chasego/cod,cc13ny/Allin,Chasego/codi,Chasego/codirit,cc13ny/Allin,Chasego/codirit,Chasego/codirit,cc13ny/algo,cc13ny/Allin,cc13ny/algo,cc13ny/Allin,Chasego/codirit,Chasego/codirit,Chasego/cod,Chasego/cod,Chasego/cod,Chasego/codi,cc13ny/Allin
leetcode/017-Letter-Combinations-of-a-Phone-Number/LettCombPhoneNum_002.py
leetcode/017-Letter-Combinations-of-a-Phone-Number/LettCombPhoneNum_002.py
class Solution(object): def letterCombinations(self, digits): """ :type digits: str :rtype: List[str] """ def comb(digits, d2l): if not digits: return [""] res = [] for c in d2l[int(digits[0])]: for suffix in comb(digits[1:], d2l): res.append(c + suffix) return res if not digits: return [] d2l = { 2: 'abc', 3: 'def', 4: 'ghi', 5: 'jkl', 6: 'mno', 7: 'pqrs', 8: 'tuv', 9: 'wxyz' } return comb(digits, d2l)
mit
Python
d92eff7e89e09167b126f99243986eae5792f705
Add py-debtcollector (#25212)
LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack
var/spack/repos/builtin/packages/py-debtcollector/package.py
var/spack/repos/builtin/packages/py-debtcollector/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyDebtcollector(PythonPackage): """ A collection of Python deprecation patterns and strategies that help you collect your technical debt in a non-destructive manner. """ homepage = "https://docs.openstack.org/debtcollector/latest" pypi = "debtcollector/debtcollector-2.2.0.tar.gz" maintainers = ['haampie'] version('2.2.0', sha256='787981f4d235841bf6eb0467e23057fb1ac7ee24047c32028a8498b9128b6829') depends_on('python@3.6:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('py-pbr@2.0.0:2.0.999,2.1.1:', type='build') depends_on('py-six@1.10.0:', type=('build', 'run')) depends_on('py-wrapt@1.7.0:', type=('build', 'run'))
lgpl-2.1
Python
6bd4b7e4c2dac2817250f184114eea8c05fbefb7
Add compat.py to get get_user_model working
Alir3z4/django-cuser
cuser/compat.py
cuser/compat.py
from __future__ import unicode_literals from django.conf import settings from django.core.exceptions import ImproperlyConfigured import django from django.utils.functional import lazy __all__ = ['User', 'AUTH_USER_MODEL'] # Django 1.5+ compatibility if django.VERSION >= (1, 5): AUTH_USER_MODEL = settings.AUTH_USER_MODEL try: from django.contrib.auth import get_user_model User = lazy(get_user_model, AUTH_USER_MODEL) except ImproperlyConfigured: pass else: from django.contrib.auth.models import User AUTH_USER_MODEL = 'auth.User'
bsd-3-clause
Python
422390ff7eb4d97eaf0c5c1a1b250010ee766ec7
Add tool for clean pyc files
xcgspring/AXUI,xcgspring/AXUI,xcgspring/AXUI
tools/cleanPYC.py
tools/cleanPYC.py
import re import os import sys print("%s path\n" % sys.argv[0]) path = sys.argv[1] for root, dirs, files in os.walk(path): for file_ in files: if re.match(".*.pyc$", file_): abs_file = os.path.join(root, file_) print("Clean %s" % abs_file) os.remove(abs_file)
apache-2.0
Python
3aacdb44210ca5af86bc9258eaecc1bbbda4ea7f
Implement colorization in it's own file
mmorenobarm/mbed-os,c1728p9/mbed-os,ryankurte/mbed-os,bcostm/mbed-os,rgrover/mbed,svogl/mbed-os,CalSol/mbed,geky/mbed,j-greffe/mbed-os,andreaslarssonublox/mbed,betzw/mbed-os,theotherjimmy/mbed,radhika-raghavendran/mbed-os5.1-onsemi,nRFMesh/mbed-os,mazimkhan/mbed-os,svogl/mbed-os,bcostm/mbed-os,pradeep-gr/mbed-os5-onsemi,mikaleppanen/mbed-os,jeremybrodt/mbed,HeadsUpDisplayInc/mbed,arostm/mbed-os,jeremybrodt/mbed,ryankurte/mbed-os,CalSol/mbed,netzimme/mbed-os,karsev/mbed-os,mikaleppanen/mbed-os,ryankurte/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,andcor02/mbed-os,tung7970/mbed-os-1,mbedmicro/mbed,geky/mbed,adustm/mbed,kjbracey-arm/mbed,NXPmicro/mbed,geky/mbed,andcor02/mbed-os,andreaslarssonublox/mbed,maximmbed/mbed,netzimme/mbed-os,Archcady/mbed-os,kjbracey-arm/mbed,svastm/mbed,betzw/mbed-os,mazimkhan/mbed-os,theotherjimmy/mbed,catiedev/mbed-os,theotherjimmy/mbed,fvincenzo/mbed-os,maximmbed/mbed,svogl/mbed-os,catiedev/mbed-os,andcor02/mbed-os,monkiineko/mbed-os,cvtsi2sd/mbed-os,kjbracey-arm/mbed,bcostm/mbed-os,adustm/mbed,fahhem/mbed-os,screamerbg/mbed,karsev/mbed-os,bulislaw/mbed-os,c1728p9/mbed-os,monkiineko/mbed-os,theotherjimmy/mbed,YarivCol/mbed-os,c1728p9/mbed-os,adamgreen/mbed,infinnovation/mbed-os,screamerbg/mbed,j-greffe/mbed-os,NXPmicro/mbed,ryankurte/mbed-os,mbedmicro/mbed,geky/mbed,mbedmicro/mbed,rgrover/mbed,bulislaw/mbed-os,bulislaw/mbed-os,tung7970/mbed-os-1,HeadsUpDisplayInc/mbed,cvtsi2sd/mbed-os,YarivCol/mbed-os,j-greffe/mbed-os,adamgreen/mbed,netzimme/mbed-os,YarivCol/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,adamgreen/mbed,fvincenzo/mbed-os,mikaleppanen/mbed-os,Archcady/mbed-os,tung7970/mbed-os,arostm/mbed-os,arostm/mbed-os,RonEld/mbed,bcostm/mbed-os,HeadsUpDisplayInc/mbed,j-greffe/mbed-os,nvlsianpu/mbed,arostm/mbed-os,NXPmicro/mbed,catiedev/mbed-os,netzimme/mbed-os,svastm/mbed,HeadsUpDisplayInc/mbed,c1728p9/mbed-os,monkiineko/mbed-os,fanghuaqi/mbed,nRFMesh/mbed-os,fvincenzo/mbed-os,j-greffe/mbed-os,mazimkhan/mbed-os,tung7970/mbed-os-1,rgrover/mbed,tung7970/mbed-os-1,cvtsi2sd/mbed-os,cvtsi2sd/mbed-os,tung7970/mbed-os,nvlsianpu/mbed,pradeep-gr/mbed-os5-onsemi,kjbracey-arm/mbed,Archcady/mbed-os,ryankurte/mbed-os,mmorenobarm/mbed-os,mazimkhan/mbed-os,YarivCol/mbed-os,kl-cruz/mbed-os,c1728p9/mbed-os,pradeep-gr/mbed-os5-onsemi,geky/mbed,arostm/mbed-os,Archcady/mbed-os,kl-cruz/mbed-os,kl-cruz/mbed-os,fahhem/mbed-os,svogl/mbed-os,andreaslarssonublox/mbed,mazimkhan/mbed-os,mmorenobarm/mbed-os,adamgreen/mbed,mbedmicro/mbed,nRFMesh/mbed-os,catiedev/mbed-os,fanghuaqi/mbed,jeremybrodt/mbed,kl-cruz/mbed-os,andcor02/mbed-os,NXPmicro/mbed,screamerbg/mbed,c1728p9/mbed-os,cvtsi2sd/mbed-os,maximmbed/mbed,CalSol/mbed,bulislaw/mbed-os,andreaslarssonublox/mbed,tung7970/mbed-os,theotherjimmy/mbed,monkiineko/mbed-os,rgrover/mbed,infinnovation/mbed-os,tung7970/mbed-os,netzimme/mbed-os,adamgreen/mbed,adustm/mbed,fanghuaqi/mbed,mazimkhan/mbed-os,catiedev/mbed-os,pradeep-gr/mbed-os5-onsemi,monkiineko/mbed-os,nRFMesh/mbed-os,tung7970/mbed-os,nRFMesh/mbed-os,Archcady/mbed-os,ryankurte/mbed-os,fahhem/mbed-os,YarivCol/mbed-os,adamgreen/mbed,fvincenzo/mbed-os,RonEld/mbed,Archcady/mbed-os,RonEld/mbed,adustm/mbed,jeremybrodt/mbed,fahhem/mbed-os,infinnovation/mbed-os,mmorenobarm/mbed-os,HeadsUpDisplayInc/mbed,andcor02/mbed-os,betzw/mbed-os,YarivCol/mbed-os,RonEld/mbed,svogl/mbed-os,nvlsianpu/mbed,maximmbed/mbed,CalSol/mbed,andreaslarssonublox/mbed,rgrover/mbed,kl-cruz/mbed-os,cvtsi2sd/mbed-os,nvlsianpu/mbed,netzimme/mbed-os,pradeep-gr/mbed-os5-onsemi,pradeep-gr/mbed-os5-onsemi,CalSol/mbed,karsev/mbed-os,NXPmicro/mbed,RonEld/mbed,infinnovation/mbed-os,bulislaw/mbed-os,fanghuaqi/mbed,maximmbed/mbed,fahhem/mbed-os,adustm/mbed,mikaleppanen/mbed-os,screamerbg/mbed,mikaleppanen/mbed-os,monkiineko/mbed-os,screamerbg/mbed,mikaleppanen/mbed-os,svogl/mbed-os,adustm/mbed,nvlsianpu/mbed,svastm/mbed,catiedev/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,betzw/mbed-os,CalSol/mbed,svastm/mbed,infinnovation/mbed-os,radhika-raghavendran/mbed-os5.1-onsemi,mmorenobarm/mbed-os,karsev/mbed-os,fahhem/mbed-os,bcostm/mbed-os,fvincenzo/mbed-os,betzw/mbed-os,jeremybrodt/mbed,nRFMesh/mbed-os,RonEld/mbed,mmorenobarm/mbed-os,NXPmicro/mbed,arostm/mbed-os,karsev/mbed-os,screamerbg/mbed,HeadsUpDisplayInc/mbed,tung7970/mbed-os-1,andcor02/mbed-os,fanghuaqi/mbed,maximmbed/mbed,svastm/mbed,betzw/mbed-os,kl-cruz/mbed-os,bcostm/mbed-os,infinnovation/mbed-os,nvlsianpu/mbed,radhika-raghavendran/mbed-os5.1-onsemi,karsev/mbed-os,bulislaw/mbed-os,j-greffe/mbed-os,theotherjimmy/mbed,mbedmicro/mbed
tools/colorize.py
tools/colorize.py
""" mbed SDK Copyright (c) 2016 ARM Limited Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ This python file is responsible for generating colorized notifiers. """ import sys import re from colorama import init init() colors = { 'none' : "", 'default' : "\033[.0m", 'bold' : "\033[.1m", 'underline' : "\033[.4m", 'blink' : "\033[.5m", 'reverse' : "\033[.7m", 'concealed' : "\033[.8m", 'black' : "\033[.30m", 'red' : "\033[.31m", 'green' : "\033[.32m", 'yellow' : "\033[.33m", 'blue' : "\033[.34m", 'magenta' : "\033[.35m", 'cyan' : "\033[.36m", 'white' : "\033[.37m", 'on_black' : "\033[.40m", 'on_red' : "\033[.41m", 'on_green' : "\033[.42m", 'on_yellow' : "\033[.43m", 'on_blue' : "\033[.44m", 'on_magenta' : "\033[.45m", 'on_cyan' : "\033[46m", 'on_white' : "\033[47m", } # Convert a color string from a string into an ascii escape code that will print # that color on the terminal. color_matcher = re.compile(r"(\w+)(\W+on\W+\w+)?") def colorstring_to_escapecode(color_string): match = re.match(color_matcher, color_string) if match: return colors[match.group(1)] + (colors[match.group(2).strip().replace(" ","_")] if match.group(2) else "") else: return corols['default'] # Wrap a toolchain notifier in a colorizer. This colorizer will wrap notifications # in a color if the severity matches a color in the *color_map*. def print_in_color_notifier (color_map, print_fn): def wrap(event, silent=False): fd = sys.stdout if fd.isatty() and 'severity' in event and event['severity'] in color_map: fd.write(colorstring_to_escapecode(color_map[event['severity']])) print_fn(event, silent) fd.write(colorstring_to_escapecode('default')) else: print_fn(event, silent) return wrap
apache-2.0
Python
0d0115ef5e088ed54a176e24cc94713b706f3d55
include migration
wwitzel3/awx,wwitzel3/awx,wwitzel3/awx,snahelou/awx,snahelou/awx,snahelou/awx,snahelou/awx,wwitzel3/awx
awx/main/migrations/0015_v300_label_changes.py
awx/main/migrations/0015_v300_label_changes.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0014_v300_invsource_cred'), ] operations = [ migrations.AlterField( model_name='label', name='organization', field=models.ForeignKey(related_name='labels', to='main.Organization', help_text='Organization this label belongs to.'), ), ]
apache-2.0
Python
5db256e6ac4ee84e008afa8f94d767330e392709
Increase coverage
proyectos-analizo-info/pybossa-analizo-info,PyBossa/pybossa,Scifabric/pybossa,OpenNewsLabs/pybossa,inteligencia-coletiva-lsd/pybossa,CulturePlex/pybossa,inteligencia-coletiva-lsd/pybossa,CulturePlex/pybossa,harihpr/tweetclickers,geotagx/pybossa,geotagx/pybossa,OpenNewsLabs/pybossa,jean/pybossa,CulturePlex/pybossa,stefanhahmann/pybossa,jean/pybossa,Scifabric/pybossa,PyBossa/pybossa,harihpr/tweetclickers,proyectos-analizo-info/pybossa-analizo-info,proyectos-analizo-info/pybossa-analizo-info,stefanhahmann/pybossa
test/test_vmcp.py
test/test_vmcp.py
# -*- coding: utf8 -*- # This file is part of PyBossa. # # Copyright (C) 2013 SF Isle of Man Limited # # PyBossa is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyBossa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with PyBossa. If not, see <http://www.gnu.org/licenses/>. import json from mock import patch from base import web, model, Fixtures, db, redis_flushall import pybossa.vmcp as vmcp from nose.tools import assert_equal, assert_raises class TestAPI: def test_myquote(self): """Test myquote works.""" # Valid char should be the same err_msg = "Valid chars should not be quoted" assert vmcp.myquote('a') == 'a', err_msg # Non-valid err_msg = "Non-Valid chars should be quoted" assert vmcp.myquote('%') == '%25', err_msg
agpl-3.0
Python
0a3e00b27606eda26917c3c69b0344dc301502f0
Revert "will this fix tests?"
zestyr/lbry,lbryio/lbry,zestyr/lbry,lbryio/lbry,zestyr/lbry,lbryio/lbry
tests/__init__.py
tests/__init__.py
# log_support setups the default Logger class # and so we need to ensure that it is also # setup for the tests from lbrynet.core import log_support
mit
Python
3dbef22cee9ea83c7e80756037209334da237d4c
Remove unused compat types from compat.py
Hasimir/twython,vivek8943/twython,ping/twython,Fueled/twython,ryanmcgrath/twython,Oire/twython,fibears/twython,joebos/twython,Devyani-Divs/twython,akarambir/twython
twython/compat.py
twython/compat.py
import sys _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) try: import simplejson as json except ImportError: import json if is_py2: from urllib import urlencode, quote_plus try: from urlparse import parse_qsl except ImportError: from cgi import parse_qsl basestring = basestring elif is_py3: from urllib.parse import urlencode, quote_plus, parse_qsl basestring = (str, bytes)
import sys _ver = sys.version_info #: Python 2.x? is_py2 = (_ver[0] == 2) #: Python 3.x? is_py3 = (_ver[0] == 3) try: import simplejson as json except ImportError: import json if is_py2: from urllib import urlencode, quote_plus try: from urlparse import parse_qsl except ImportError: from cgi import parse_qsl builtin_str = str bytes = str str = unicode basestring = basestring numeric_types = (int, long, float) elif is_py3: from urllib.parse import urlencode, quote_plus, parse_qsl builtin_str = str str = str bytes = bytes basestring = (str, bytes) numeric_types = (int, float)
mit
Python
f10049ae831570b54581c2a089218359febe5c50
add command for exporting to csv
tswicegood/django-fecfilings
fecfilings/management/commands/fecfilings_to_csv.py
fecfilings/management/commands/fecfilings_to_csv.py
from django.core.management.base import NoArgsCommand from fecfilings.models import Contributor class Command(NoArgsCommand): def handle(self, **options): for c in Contributor.objects.all(): print c.to_csv()
apache-2.0
Python
23165cbd1ac8ba1528649c04b56d598664e1da8b
Enhance mysensors binary sensor device classes (#13367)
tchellomello/home-assistant,Cinntax/home-assistant,jawilson/home-assistant,home-assistant/home-assistant,PetePriority/home-assistant,nkgilley/home-assistant,postlund/home-assistant,MartinHjelmare/home-assistant,persandstrom/home-assistant,auduny/home-assistant,molobrakos/home-assistant,molobrakos/home-assistant,aequitas/home-assistant,sander76/home-assistant,Danielhiversen/home-assistant,qedi-r/home-assistant,Teagan42/home-assistant,mKeRix/home-assistant,nugget/home-assistant,MartinHjelmare/home-assistant,sdague/home-assistant,jnewland/home-assistant,sdague/home-assistant,nugget/home-assistant,HydrelioxGitHub/home-assistant,jabesq/home-assistant,adrienbrault/home-assistant,jawilson/home-assistant,tboyce1/home-assistant,tinloaf/home-assistant,persandstrom/home-assistant,FreekingDean/home-assistant,jnewland/home-assistant,jamespcole/home-assistant,auduny/home-assistant,home-assistant/home-assistant,titilambert/home-assistant,tboyce1/home-assistant,fbradyirl/home-assistant,kennedyshead/home-assistant,DavidLP/home-assistant,DavidLP/home-assistant,turbokongen/home-assistant,titilambert/home-assistant,molobrakos/home-assistant,mKeRix/home-assistant,PetePriority/home-assistant,Cinntax/home-assistant,mezz64/home-assistant,tchellomello/home-assistant,PetePriority/home-assistant,auduny/home-assistant,soldag/home-assistant,mKeRix/home-assistant,toddeye/home-assistant,balloob/home-assistant,sander76/home-assistant,tinloaf/home-assistant,balloob/home-assistant,rohitranjan1991/home-assistant,aequitas/home-assistant,mezz64/home-assistant,qedi-r/home-assistant,joopert/home-assistant,balloob/home-assistant,jamespcole/home-assistant,aronsky/home-assistant,nkgilley/home-assistant,nugget/home-assistant,soldag/home-assistant,pschmitt/home-assistant,aronsky/home-assistant,persandstrom/home-assistant,lukas-hetzenecker/home-assistant,robbiet480/home-assistant,adrienbrault/home-assistant,jnewland/home-assistant,DavidLP/home-assistant,HydrelioxGitHub/home-assistant,kennedyshead/home-assistant,postlund/home-assistant,MartinHjelmare/home-assistant,w1ll1am23/home-assistant,jabesq/home-assistant,tboyce1/home-assistant,turbokongen/home-assistant,joopert/home-assistant,mKeRix/home-assistant,tboyce1/home-assistant,jamespcole/home-assistant,fbradyirl/home-assistant,HydrelioxGitHub/home-assistant,partofthething/home-assistant,rohitranjan1991/home-assistant,robbiet480/home-assistant,Danielhiversen/home-assistant,leppa/home-assistant,leppa/home-assistant,tinloaf/home-assistant,rohitranjan1991/home-assistant,Teagan42/home-assistant,toddeye/home-assistant,GenericStudent/home-assistant,w1ll1am23/home-assistant,partofthething/home-assistant,GenericStudent/home-assistant,aequitas/home-assistant,jabesq/home-assistant,fbradyirl/home-assistant,FreekingDean/home-assistant,pschmitt/home-assistant,lukas-hetzenecker/home-assistant,tboyce021/home-assistant,tboyce021/home-assistant
homeassistant/components/binary_sensor/mysensors.py
homeassistant/components/binary_sensor/mysensors.py
""" Support for MySensors binary sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.mysensors/ """ from homeassistant.components import mysensors from homeassistant.components.binary_sensor import ( DEVICE_CLASSES, DOMAIN, BinarySensorDevice) from homeassistant.const import STATE_ON SENSORS = { 'S_DOOR': 'door', 'S_MOTION': 'motion', 'S_SMOKE': 'smoke', 'S_SPRINKLER': 'safety', 'S_WATER_LEAK': 'safety', 'S_SOUND': 'sound', 'S_VIBRATION': 'vibration', 'S_MOISTURE': 'moisture', } def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the MySensors platform for binary sensors.""" mysensors.setup_mysensors_platform( hass, DOMAIN, discovery_info, MySensorsBinarySensor, add_devices=add_devices) class MySensorsBinarySensor(mysensors.MySensorsEntity, BinarySensorDevice): """Representation of a MySensors Binary Sensor child node.""" @property def is_on(self): """Return True if the binary sensor is on.""" return self._values.get(self.value_type) == STATE_ON @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" pres = self.gateway.const.Presentation device_class = SENSORS.get(pres(self.child_type).name) if device_class in DEVICE_CLASSES: return device_class return None
""" Support for MySensors binary sensors. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/binary_sensor.mysensors/ """ from homeassistant.components import mysensors from homeassistant.components.binary_sensor import ( DEVICE_CLASSES, DOMAIN, BinarySensorDevice) from homeassistant.const import STATE_ON def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the MySensors platform for binary sensors.""" mysensors.setup_mysensors_platform( hass, DOMAIN, discovery_info, MySensorsBinarySensor, add_devices=add_devices) class MySensorsBinarySensor(mysensors.MySensorsEntity, BinarySensorDevice): """Representation of a MySensors Binary Sensor child node.""" @property def is_on(self): """Return True if the binary sensor is on.""" return self._values.get(self.value_type) == STATE_ON @property def device_class(self): """Return the class of this sensor, from DEVICE_CLASSES.""" pres = self.gateway.const.Presentation class_map = { pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke', } if float(self.gateway.protocol_version) >= 1.5: class_map.update({ pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture', }) if class_map.get(self.child_type) in DEVICE_CLASSES: return class_map.get(self.child_type)
apache-2.0
Python
60ffb1d13f00851377960eb76c90a7ef4592d03c
Create kivy_android_carousel.py
skorokithakis/pythess-files,skorokithakis/pythess-files,skorokithakis/pythess-files
009---Nine-Nine/kivy_android_carousel.py
009---Nine-Nine/kivy_android_carousel.py
#!/usr/bin/python # -*- coding: utf-8 -*- import kivy from kivy.app import App from kivy.uix.button import Button from kivy.uix.image import Image from kivy.uix.label import Label from kivy.uix.scatter import Scatter from kivy.uix.screenmanager import Screen, ScreenManager, FadeTransition from kivy.uix.floatlayout import FloatLayout from kivy.uix.carousel import Carousel from os import listdir from os.path import isfile, join #kivy.require('1.9.1') images_path = "images/" carousel_images = [f for f in listdir(images_path) if isfile(join(images_path, f))] #print carousel_images class ImagesCarousel(Carousel): """ #STEP 4 A simple carousel to load our images to avoid scatter and make it simple remove scatter and replace add_widget(image_to_add) """ def __init__(self,*args, **kwargs): super(ImagesCarousel, self).__init__(**kwargs) for image in carousel_images: scatter = Scatter(pos=(100,200), scale=4, do_scale=True) image_to_add = Image(source=images_path+image) scatter.add_widget(image_to_add) self.add_widget(scatter) class ScreenOne(Screen): """ STEP 2 This is screen 1 -> see class PyThess(App) 2nd line in build """ def __init__(self,**kwargs): super(ScreenOne, self).__init__(**kwargs) my_box = FloatLayout(orientation='vertical') button1 = Button(text="To the next screen",color=[1,1,1,1],size_hint_y=0.1, size_hint_x=1, pos_hint={'x':0, 'y': 0.9}) button1.bind(on_press=self.screen_changer1) label = Label(text='Hello PyThess', font_size='40sp', pos_hint={'x':0, 'y': 0.3}) my_box.add_widget(button1) my_box.add_widget(label) self.add_widget(my_box) def screen_changer1(self, *args): self.manager.current = 'screen2' class ScreenTwo(Screen): """ #STEP 3 This is screen 2 -> see class PyThess(App) 3rd line in build """ def __init__ (self,**kwargs): super (ScreenTwo, self).__init__(**kwargs) my_box = FloatLayout(orientation='vertical') my_box1 = FloatLayout(orientation='vertical',size_hint_y=0.9,size_hint_x = 1, pos_hint={'x':0, 'y': 0}) button1 = Button(text="To the previous screen",color=[0,0,0,1],size_hint_y=0.1, size_hint_x=1, pos_hint={'x':0, 'y': 0.9}) button1.bind(on_press=self.screen_changer1) my_box.add_widget(button1) local_carousel = ImagesCarousel(direction='right') # Here we create the new Carousel my_box1.add_widget(local_carousel) my_box.add_widget(my_box1) self.add_widget(my_box) def screen_changer1(self, *args): self.manager.current = 'screen1' class PyThess(App): """ #STEP 1 The basic app class. Here we load the screen manager """ def build(self): self.my_screenmanager = ScreenManager(transition=FadeTransition()) screen1 = ScreenOne(name='screen1') screen2 = ScreenTwo(name='screen2') self.my_screenmanager.add_widget(screen1) self.my_screenmanager.add_widget(screen2) return self.my_screenmanager if __name__ == '__main__': PyThess().run()
mit
Python
c9f70c7a4a24be0cdd9dcf044a06051b0978efff
add exceptions
lorehov/json-rpc,clach04/json-rpc
jsonrpc/exceptions.py
jsonrpc/exceptions.py
class JSONRPCError(object): """ Error for JSON-RPC communication. When a rpc call encounters an error, the Response Object MUST contain the error member with a value that is a Object with the following members: code: A Number that indicates the error type that occurred. This MUST be an integer. message: A String providing a short description of the error. The message SHOULD be limited to a concise single sentence. data: A Primitive or Structured value that contains additional information about the error. This may be omitted. The value of this member is defined by the Server (e.g. detailed error information, nested errors etc.). The error codes from and including -32768 to -32000 are reserved for pre-defined errors. Any code within this range, but not defined explicitly below is reserved for future use. The error codes are nearly the same as those suggested for XML-RPC at the following url: http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php """ def __init__(self, code=None, message=None, data=None): self.code = code or self.code self.message = message or self.message self.data = data @property def _dict(self): """ Return object dict representation. :return dict: """ data = dict(code=self.code, message=self.message) if self.data: data["data"] = self.data return data class JSONRPCParseError(JSONRPCError): """ Parse Error. Invalid JSON was received by the server. An error occurred on the server while parsing the JSON text. """ code = -32700 message = "Parse error" class JSONRPCInvalidRequest(JSONRPCError): """ Invalid Request. The JSON sent is not a valid Request object. """ code = -32600 message = "Invalid Request" class JSONRPCMethodNotFound(JSONRPCError): """ Method not found. The method does not exist / is not available. """ code = -32601 message = "Method not found" class JSONRPCInvalidParams(JSONRPCError): """ Invalid params. Invalid method parameter(s). """ code = -32602 message = "Invalid params" class JSONRPCInternalError(JSONRPCError): """ Internal error. Internal JSON-RPC error. """ code = -32603 message = "Internal error" class JSONRPCServerError(JSONRPCError): """ Server error. Reserved for implementation-defined server-errors. """ code = -32000 message = "Server error"
mit
Python
2947a2c9b6348d248e3ae740722d6a7aa04327c0
add reg d included definitions
cfpb/regulations-configs,ascott1/regulations-configs,grapesmoker/regulations-configs,willbarton/regulations-configs
regconfig/reg_d.py
regconfig/reg_d.py
from regparser.default_settings import * #### Regulation D INCLUDE_DEFINITIONS_IN_PART_1004 = [ ('Alternative mortgage transaction', 'Alternative mortgage transaction'), ('Creditor', 'Creditor'), ('State', 'State'), ('State law', 'State law'), ] INCLUDE_DEFINITIONS_IN['1004'] = INCLUDE_DEFINITIONS_IN_PART_1004
cc0-1.0
Python
1e32a27b35e25e780e8af6cc76d1eb424328171b
add leetcode Populating Next Right Pointers in Each Node
Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code,Fity/2code
leetcode/PopulatingNextRightPointersinEachNode/solution.py
leetcode/PopulatingNextRightPointersinEachNode/solution.py
# -*- coding:utf-8 -*- # Definition for a binary tree node # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None # self.next = None class Solution: # @param root, a tree node # @return nothing def connect(self, root): if root is None: return root if root.left is not None: root.left.next = root.right if root.right is not None and root.next is not None: root.right.next = root.next.left or root.next.right self.connect(root.left) self.connect(root.right)
mit
Python
b220410ad51413d52076bec84a3bf1a660f9883b
Add a program that says Hello Shikha
ctsit/J.O.B-Training-Repo-1
helloShikha.py
helloShikha.py
#This is my hello world program print 'Hello Shikha!'
apache-2.0
Python
5a120774bae2d9775493c6806841a97e790b266e
Create citreo_code_v2.py
ujjwalkarn/DataSciencePython,ujjwalkarn/DataSciencePython
Logistic-Regression/citreo_code_v2.py
Logistic-Regression/citreo_code_v2.py
from datetime import datetime from csv import DictReader from math import exp, log, sqrt # parameters ################################################################# train = 'train.csv' # path to training file test = 'test.csv' # path to testing file D = 2 ** 20 # number of weights use for learning alpha = .1 # learning rate for sgd optimization # function definitions ####################################################### # A. Bounded logloss # INPUT: # p: our prediction # y: real answer # OUTPUT # logarithmic loss of p given y def logloss(p, y): p = max(min(p, 1. - 10e-12), 10e-12) return -log(p) if y == 1. else -log(1. - p) # B. Apply hash trick of the original csv row # for simplicity, we treat both integer and categorical features as categorical # INPUT: # csv_row: a csv dictionary, ex: {'Lable': '1', 'I1': '357', 'I2': '', ...} # D: the max index that we can hash to # OUTPUT: # x: a list of indices that its value is 1 # def get_x(csv_row, D): # x = [0] # 0 is the index of the bias term # for key, value in csv_row.items(): # index = int(value + key[1:], 16) % D # weakest hash ever ;) # x.append(index) # return x # x contains indices of features that have a value of 1 # C. Get probability estimation on x # INPUT: # x: features # w: weights # OUTPUT: # probability of p(y = 1 | x; w) def get_p(x, w): wTx = 0. for i in x: # do wTx wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1. return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid # D. Update given model # INPUT: # w: weights # n: a counter that counts the number of times we encounter a feature # this is used for adaptive learning rate # x: feature # p: prediction of our model # y: answer # OUTPUT: # w: updated model # n: updated count def update_w(w, n, x, p, y): for i in x: # alpha / (sqrt(n) + 1) is the adaptive learning rate heuristic # (p - y) * x[i] is the current gradient # note that in our case, if i in x then x[i] = 1 w[i] -= (p - y) * alpha / (sqrt(n[i]) + 1.) n[i] += 1. return w, n # training and testing ####################################################### # initialize our model w = [0.] * D # weights n = [0.] * D # number of times we've encountered a feature # start training a logistic regression model using on pass sgd loss = 0. for t, row in enumerate(DictReader(open(train))): y = 1. if row['Label'] == '1' else 0. del row['Label'] # can't let the model peek the answer del row['Id'] # we don't need the Id # main training procedure # step 1, get the hashed features x = get_x(row, D) # step 2, get prediction p = get_p(x, w) # for progress validation, useless for learning our model loss += logloss(p, y) if t % 1000000 == 0 and t > 1: print('%s\tencountered: %d\tcurrent logloss: %f' % ( datetime.now(), t, loss/t)) # step 3, update model with answer w, n = update_w(w, n, x, p, y) # testing (build kaggle's submission file) with open('submission1234.csv', 'w') as submission: submission.write('Id,Predicted\n') for t, row in enumerate(DictReader(open(test))): Id = row['Id'] del row['Id'] x = get_x(row, D) p = get_p(x, w) submission.write('%s,%f\n' % (Id, p))
mit
Python
557652d4b4297dd80d844915c3d57fc3e46ac83a
add graham's solution:
pycam/python-functions-and-modules,pycam/python-basic,pycam/python-basic,pycam/python-functions-and-modules
solutions/4_21_grsr.py
solutions/4_21_grsr.py
import sys for line in sys.stdin: line = line.rstrip() pop, sample_id, sample_name, sex = line.split(",") if (sample_id == sys.argv[1]): print "Found", sample_id
unlicense
Python
8db04e5d648c9e923f7977f456242d9ea9b80050
Create pig_latin.py
Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects,Souloist/Projects
solutions/pig_latin.py
solutions/pig_latin.py
def pig_latin(input_string): new_string = [] for i in input_string.split(): if i[0] not in "aeiou": i = i[1:]+i[0] i +="ay" new_string.append(i) return ' '.join(new_string) def main(): user_input = str(raw_input("Please give me a phrase: ")) print pig_latin(user_input) if __name__ == '__main__': main()
mit
Python
f5460adbaeb87421a7f193a700d25e5a3c6e4351
Create crypt.py
liyupi/self-python-tools
crypt.py
crypt.py
from itertools import cycle def crypt(source,key): result="" a=cycle(key) for ch in source: result+=chr(ord(ch)^ord(next(a))) return result if __name__=="__main__": source=input("输入想要加密/解密的字串:") key=input("输入密钥:") print("加密/解密成功!密码为:"+crypt(source,key))
mit
Python
5ed7db70874f3ebfe9c946d38ccf12228dacac3a
Test if we tried to commit with an empty message, it should raise a ValueError
PressLabs/pyolite,shawkinsl/pyolite
tests/test_git.py
tests/test_git.py
from unittest import TestCase from mock import MagicMock, patch from nose.tools import raises from pyolite.git import Git class TestGit(TestCase): @raises(ValueError) def test_commit_with_no_message(self): mock_repo = MagicMock() mock_index = MagicMock() mock_remotes = MagicMock() mock_repo.index = mock_index mock_repo.remotes.origin = mock_remotes with patch.multiple('pyolite.git', Repo=mock_repo): git = Git('~/path/to/repo') objects = ['simple_object', 'more_complex_one'] git.commit(objects, '')
bsd-2-clause
Python
1a3d9b3da91a5c87316e44498a876f70a49df8ad
add 70
ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler
python/p070.py
python/p070.py
import utils def is_perm(a, b): return sorted(str(a)) == sorted(str(b)) best = (10000, 1) primes = [ i for i in utils.primes(4000) if i > 2000 ] for i in primes: for j in primes: n = i * j if n > 10**7: break phi = (i - 1) * (j - 1) ratio = (n * 1.0) / phi curr = (ratio, n) if is_perm(n, phi) and curr < best: best = curr print best[1]
bsd-3-clause
Python
c4764ef1aa1a1aaa0ae8dd909c3578705c7a2060
add 77
ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler,ericdahl/project-euler
python/p077.py
python/p077.py
import utils primes = utils.primes(100) def count(target): ways = [0] * (target + 1) ways[0] = 1 for p in primes: for j in xrange(p, target + 1): ways[j] += ways[j - p] return ways[target] for target in xrange(2, 100): if count(target) > 5000: print target break
bsd-3-clause
Python
9a237141c9635d2a1dad6349ad73d24e969d8460
Add runner
subutux/HUD,subutux/HUD
hud-runner.py
hud-runner.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """Convenience wrapper for running hud directly from source tree.""" from hud.hud import main if __name__ == '__main__': main()
mit
Python
bfb7d8d9356fe66f433556977a333e4256c6fb61
Create series.py
RonsenbergVI/trendpy,RonsenbergVI/trendpy
trendpy/series.py
trendpy/series.py
# series.py # MIT License # Copyright (c) 2017 Rene Jean Corneille # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import matplotlib.pyplot as plt from trendpy.mcmc import MCMC from trendpy.factory import StrategyFactory from pandas import DataFrame, read_csv class Series(object): def __init__(self): self.data=None self.is_log_price = False def __len__(self): return self.data.size def __str__(self): return self.data.__str__() @staticmethod def from_csv(filename, nomalise=True): ts=Series() ts.nomalise = nomalise ts.data=read_csv(filename,index_col=0) return ts def returns(self,period=1): pass def save(self,filename='export.csv',type='csv',separator=','): if type=='csv': pass if type=='json': pass def plot(self): self.data.plot() plt.show() def filter(self, method="L1Filter",number_simulations=100, burns=50,total_variation=2): mcmc = MCMC(self, StrategyFactory.create(method,self.data.as_matrix()[:,0],total_variation_order=total_variation)) mcmc.run(number_simulations) trend = mcmc.output(burns,"trend") self.data = self.data.join(DataFrame(trend,index=self.data.index,columns=[method])) def regression(self,method="lasso", number_simulations=100, burns=50): pass def export(self, filename, as_txt=False): pass
mit
Python
250d1c20c16b6c0846a9fb94ef4ebc6e780221df
Create solution.py
lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges,lilsweetcaligula/Online-Judges
hackerrank/algorithms/implementation/easy/equalize_the_array/py/solution.py
hackerrank/algorithms/implementation/easy/equalize_the_array/py/solution.py
def solution(nums): import collections if len(nums) == 0: return 0 item, count = collections.Counter(nums).most_common()[0] return len(nums) - count n = int(input()) nums = tuple(map(int, input().split())) cnt = solution(nums) print(cnt)
mit
Python
fca6421c53e286549d861c65c114991602f310ea
Add some adaptors.
drtconway/pykmer
pykmer/adaptors.py
pykmer/adaptors.py
""" This module provides some adaptors for converting between different data formats: `k2kf` Convert a sequence of k-mers to k-mer frequency pairs `kf2k` Convert a sequence of k-mer frequency pairs to k-mers `keyedKs` Provide keyed access to a sequence of k-mers `keyedKFs` Provide keyed access to a sequence of k-mer frequency pairs """ def k2kf(xs, f=1): for x in xs: yield (x, f) def kf2k(xs): for (x, _) in xs: yield x class keyedKs: def __init__(self, itr): self.itr = itr self.more = True self.next() def valid(self): return self.more def kmer(self): assert self.valid() return self.curr def item(self): assert self.valid() return self.curr def next(self): assert self.valid() try: self.curr = self.itr.next() except StopIteration: self.more = False class keyedKfs: def __init__(self, itr): self.itr = itr self.more = True self.next() def valid(self): return self.more def kmer(self): assert self.valid() return self.curr[0] def item(self): assert self.valid() return self.curr def next(self): assert self.valid() try: self.curr = self.itr.next() except StopIteration: self.more = False
apache-2.0
Python
b7f3e32827bb9a0f122928d218f4d535febb0829
add command
xuwei0455/design_patterns
Command.py
Command.py
# -*- coding: utf-8 -*- """ Command pattern """ from os import listdir, curdir class ListCommand(object): def __init__(self, path=None): self.path = path or curdir def execute(self): self._list(self.path) @staticmethod def _list(path=None): print 'list path {} :'.format(path) print listdir(path) if __name__ == "__main__": command = ListCommand() command.execute()
mit
Python
bd865a9fdc941b99be40a5ba3dcc02b819b2e9da
add cpm.utils.refstring
nodepy/nodepy-pm,nodepy/nodepy
cpm/utils/refstring.py
cpm/utils/refstring.py
# Copyright (c) 2017 Niklas Rosenstein # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import collections import re from . import semver Ref = collections.namedtuple('Ref', 'package version module function') spec = '[<package>[@<version>]][/<module>][:<function>]' regex = re.compile('''^ (?: (?P<package> [A-z0-9\.\-_]+) (?: @(?P<version> [0-9\.]+[A-z0-9\.\-\+]*))? )? (?: /(?P<module> [A-z0-9\.\-_]+))? (?: :(?P<function> [A-z0-9\.\-_]+))? $''', re.X) def parse(s): """ Parse a reference string and returns a #Ref which is a namedtuple consisting of the members *package*, *version*, *module* and *function*. The parameter *s* must be a string of the format [<package>[@<version>]][/<module>][:<function>] # Raises ValueError: If the string is invalid. """ m = regex.match(s) if not m: raise ValueError('invalid refstring: "{}"'.format(s)) package, version, module, function = m.groups() if version: try: version = semver.Version(version) except ValueError as exc: raise ValueError('invalid refstring: "{}" ({})'.format(s, exc)) return Ref(package, version, module, function) def join(package=None, version=None, module=None, function=None): """ Concatenes the components of a reference back into a string. To use this function with a #Ref object, simply use argument-unpacking like this: `join(*ref)`. """ if package: result = package if version: result += '@' + str(version) else: if version: raise ValueError('version can not be specified without a package') result = '' if module: result += '/' + module if function: result += ':' + function return result
mit
Python
edb904ca105abfb767f94f366e19ed05374a8014
Create URL Shortner
harshfatepuria/data-analysis-test,harshfatepuria/data-analysis-test,harshfatepuria/data-analysis-test,harshfatepuria/data-analysis-test
URLShortner.py
URLShortner.py
import uuid import json import os from glob import iglob from pprint import pprint mapping={} mapping['URL']=[] #Getting JSON file of initial Tika parsing containing list of file paths categorized by MIME types file="C:/Users/rahul/Documents/GitHub/Scientific-Content-Enrichment-in-the-Text-Retrieval-Conference-TREC-Polar-Dynamic-Domain-Dataset/fulldump-path-all-json/" outFile='output-from-url-shortner-all-types'+'.json' output_file=open(outFile,'w') for filepath in iglob(os.path.join(file, '*.json')): with open(filepath) as data_file: data = json.load(data_file) for i in data['files']: #Getting a unique md5 hash for the file path relative to the current directory d={} d['filePath']=i s="polar.usc.edu/"+str(uuid.uuid4())[:8] d['shortURL']=s mapping['URL'].append(d) print "\'"+ i+ "\'" + " : " +"\'"+ s+ "\'" #print dispString #output_file.write(dispString) data_file.close() #Dumping JSON object with mapped shortened URLs and file path keys=json.dumps(mapping, sort_keys=True) output_file.write(keys) output_file.close()
apache-2.0
Python
62a13341610d476ba8ff9e3fd5a3476cbdb18225
Create convert.py
manasRK/glove-gensim
convert.py
convert.py
import gensim #word2vec embeddings start with a line with the number of lines (tokens?) and the number of dimensions of the file. This allows #gensim to allocate memory accordingly for querying the model. Larger dimensions mean larger memory is held captive. Accordingly, this line #has to be inserted into the GloVe embeddings file. #GloVe Model File #More models can be downloaded from http://nlp.stanford.edu/projects/glove/ fname="glove.6B.50d.txt" #convert Glove vectors to word2vec format word2vec_convert_file="C:/Users/Manas/Desktop/ML/Topics_Multiclass/Zomato_Reviews/Data/IMDB/word2vec_line.txt" #to be a first line insert num_lines = sum(1 for line in open(fname)) dims=50 print '%d lines with %d dimensions' %(num_lines,dims) with open(word2vec_convert_file,'w') as f: f.write(str(num_lines)+ " " +str(dims) + '\n') f.close() model_file='glove_model.txt' filenames = [word2vec_convert_file,fname] with open(model_file, 'w') as outfile: for fname in filenames: with open(fname) as infile: for line in infile: outfile.write(line) outfile.close() #load converted model file model=gensim.models.Word2Vec.load_word2vec_format(model_file,binary=False) #GloVe Model print model.most_similar(positive=['australia'], topn=10) print model.similarity('woman', 'man')
mit
Python
5d5ccc84eaaec6b6d749a9054f744a5a44f9dac9
add script for reading from PCF8574
lnitram/pi-playground,lnitram/pi-playground
i2c/PCF8574.py
i2c/PCF8574.py
#!/usr/bin/python import sys import smbus import time # Reads data from PCF8574 and prints the state of each port def readPCF8574(busnumber,address): address = int(address,16) busnumber = int(1) bus = smbus.SMBus(busnumber) state = bus.read_byte(address); for i in range(0,8): port = "port " + str(i) value = 1&(state>>7-i) print str(port) + ': ' + str(value) if len(sys.argv) != 3: print "Usage: python PCF8574.py bus address" exit(1) bus = sys.argv[1] address = sys.argv[2] readPCF8574(bus,address)
mit
Python
b792a8cb3d61dbac1c48a16585c7bb6725bc06a0
add barebones
riceissa/ssg-riceissa.com
barebones_ssg/ssg.py
barebones_ssg/ssg.py
# hack to get unicode working with jinja2 import sys reload(sys) sys.setdefaultencoding('utf-8') import glob import metadata as meta from tag_ontology import * import commands as c import json from jinja2 import Template, Environment, FileSystemLoader import os pages_pat = "pages/*.md" pages_lst = glob.glob(pages_pat) all_tags = [] page_data = [] for page in pages_lst: output = c.run_command("pandoc -f markdown -t json {page}".format(page=page)) json_lst = json.loads(output) file_dict = meta.organize_tags(json_lst, tag_synonyms, tag_implications) tags_lst = meta.get_tags(file_dict['json']) all_tags.extend(tags_lst) json_str = json.dumps(file_dict['json'], separators=(',',':')) body = c.run_command("pandoc -f json -t html", pipe_in=json_str) title = meta.get_metadata_field(json_lst, "title") math = meta.get_metadata_field(json_lst, "math") license = meta.get_metadata_field(json_lst, "license") env = Environment(loader=FileSystemLoader('.')) skeleton = env.get_template('templates/skeleton.html') tags = [] for tag in tags_lst: tags.append({'name': tag, 'path': ("tags/" + tag)}) final = skeleton.render(body=body, title=title, tags=tags, license=license, math=math) inter = os.path.split(os.path.splitext(page)[0])[1] write_to = "_site/" + inter page_data.append((title, inter, tags_lst)) with open(write_to, 'w') as f: f.write(final) all_tags = list(set(all_tags)) for tag in all_tags: pages = [] for page_tuple in page_data: if tag in page_tuple[2]: pages.append({'title': page_tuple[0], 'url': "../" + page_tuple[1]}) write_to = "_site/tags/" + tag env = Environment(loader=FileSystemLoader('.')) page_list = env.get_template('templates/page-list.html') body = page_list.render(pages=pages) skeleton = env.get_template('templates/skeleton.html') final = skeleton.render(body=body, title="Tag page for " + tag) with open(write_to, 'w') as f: f.write(final) print write_to env = Environment(loader=FileSystemLoader('.')) page_list = env.get_template('templates/page-list.html') pages = [{'title': tag, 'url': tag} for tag in all_tags] body = page_list.render(pages=pages) skeleton = env.get_template('templates/skeleton.html') final = skeleton.render(title="All tags", body=body) with open("_site/tags/index", 'w') as f: f.write(final) env = Environment(loader=FileSystemLoader('.')) page_list = env.get_template('templates/page-list.html') pages = [{'title': page_tup[0], 'url': page_tup[1]} for page_tup in page_data] body = page_list.render(pages=pages) skeleton = env.get_template('templates/skeleton.html') final = skeleton.render(title="All pages on the site", body=body) with open("_site/all", 'w') as f: f.write(final)
bsd-2-clause
Python
ddbe9de5cfc5b412812096291db6a37d120e03ce
add plotting the distribution of fields and apgoee
jobovy/apogee-maps
py/plot_dustwapogee.py
py/plot_dustwapogee.py
############################################################################### # plot_dustwapogee: plot the dust-map at 5 kpc with the APOGEE fields in the # sample overlayed ############################################################################### import sys import numpy import healpy from galpy.util import bovy_plot import apogee.select.apogeeSelect import dust import define_rcsample # nside to work at, 2048 is the max _NSIDE=2048 def plot_dustwapogee(plotname): # Load the dust map green15map= dust.load_green15(5.,nest=True,nside_out=_NSIDE) green15map[green15map == healpy.UNSEEN]= -1. # plot it healpy.visufunc.mollview(green15map, nest=True, xsize=4000,min=0.,max=.8, format=r'$%g$', title='', cmap='gist_yarg', unit='$A_H\,(\mathrm{mag})$') # Load the RC data to get the fields data= define_rcsample.get_rcsample() loc_ids= numpy.array(list(set(data['LOCATION_ID']))) # Load the selection function, just to get the field centers apo= apogee.select.apogeeSelect(_justprocessobslog=True) theta= numpy.empty(len(loc_ids)) phi= numpy.empty(len(loc_ids)) for ii,loc_id in enumerate(loc_ids): tl, tb= apo.glonGlat(loc_id) theta[ii]= (90.-tb)/180.*numpy.pi phi[ii]= tl/180.*numpy.pi hib= numpy.fabs((numpy.pi/2.-theta)) > (8./180.*numpy.pi) healpy.visufunc.projplot(theta[hib],phi[hib],'o',ms=5.,mfc='none',mew=0.8, mec='k') lowb= True-hib healpy.visufunc.projplot(theta[lowb],phi[lowb],'o',ms=5.,mfc='none', mec='w',mew=0.8) bovy_plot.bovy_end_print(plotname) if __name__ == '__main__': plot_dustwapogee(sys.argv[1])
bsd-3-clause
Python
139a634515061674d3832320791d35ff512d8a5a
Add a snippet.
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
python/print_stderr.py
python/print_stderr.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys print("Error message", file=sys.stderr)
mit
Python
2909b4a7e46fe4a466e0c99abf90222c43f34d93
add tests for Every Election wrapper
DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
polling_stations/apps/data_finder/tests/test_ee_wrapper.py
polling_stations/apps/data_finder/tests/test_ee_wrapper.py
import mock from django.test import TestCase from data_finder.helpers import EveryElectionWrapper # mock get_data() functions def get_data_exception(self, postcode): raise Exception() def get_data_no_elections(self, postcode): return [] def get_data_with_elections(self, postcode): return [ {}, # no explanation key {'explanation': None}, # null explanation key {'explanation': 'some text'}, # explanation key contains text ] class EveryElectionWrapperTest(TestCase): @mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_exception) def test_exception(self): ee = EveryElectionWrapper('AA11AA') self.assertFalse(ee.request_success) self.assertTrue(ee.has_election()) self.assertEqual([], ee.get_explanations()) @mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_no_elections) def test_no_elections(self): ee = EveryElectionWrapper('AA11AA') self.assertTrue(ee.request_success) self.assertFalse(ee.has_election()) self.assertEqual([], ee.get_explanations()) @mock.patch("data_finder.helpers.EveryElectionWrapper.get_data", get_data_with_elections) def test_elections(self): ee = EveryElectionWrapper('AA11AA') self.assertTrue(ee.request_success) self.assertTrue(ee.has_election()) self.assertEqual(['some text'], ee.get_explanations())
bsd-3-clause
Python
67d760f0a3ed081d43237e1b2106b86a4e6a56c6
add log handler
jhao104/proxy_pool,jhao104/proxy_pool
Util/LogHandler.py
Util/LogHandler.py
# -*- coding: utf-8 -*- """ ------------------------------------------------- File Name: LogHandler.py Description : Author : JHao date: 2017/3/6 ------------------------------------------------- Change Activity: 2017/3/6: log handler ------------------------------------------------- """ __author__ = 'JHao' import logging from logging.handlers import TimedRotatingFileHandler # 日志级别 CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 class LogHandler(logging.Logger): """ LogHandler """ def __init__(self, name, level=DEBUG): self.name = name self.level = level logging.Logger.__init__(self, self.name, level=level) self.__setFileHandler__() self.__setStreamHandler__() def __setFileHandler__(self, level=None): """ set file handler :param level: :return: """ file_name = '../log/%s' % self.name # 设置日志回滚, 保存在log目录, 一天保存一个文件, 保留15天 file_handler = TimedRotatingFileHandler(filename=file_name, when='D', interval=1, backupCount=15) file_handler.suffix = '%Y%m%d.log' if not level: file_handler.setLevel(self.level) else: file_handler.setLevel(level) formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') file_handler.setFormatter(formatter) self.addHandler(file_handler) def __setStreamHandler__(self, level=None): """ set stream handler :param level: :return: """ stream_handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s') stream_handler.setFormatter(formatter) if not level: stream_handler.setLevel(self.level) else: stream_handler.setLevel(level) self.addHandler(stream_handler) if __name__ == '__main__': # log = get_logger("aa") # log.error("aa") pass
mit
Python
7331e1d1061a7a1ac9abc583d45746facfde9180
Create search-in-a-binary-search-tree.py
tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015
Python/search-in-a-binary-search-tree.py
Python/search-in-a-binary-search-tree.py
# Time: O(h) # Space: O(1) # Given the root node of a binary search tree (BST) and a value. # You need to find the node in the BST that the node's value equals the given value. # Return the subtree rooted with that node. # If such node doesn't exist, you should return NULL. # # For example, # # Given the tree: # 4 # / \ # 2 7 # / \ # 1 3 # # And the value to search: 2 # You should return this subtree: # # 2 # / \ # 1 3 # In the example above, # if we want to search the value 5, # since there is no node with value 5, we should return NULL. # Definition for a binary tree node. class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): def searchBST(self, root, val): """ :type root: TreeNode :type val: int :rtype: TreeNode """ while root and val != root.val: if val < root.val: root = root.left else: root = root.right return root
mit
Python
58311387849f8785fa964eb01e728c92bc0d8b61
Create levenshtein.py
MathYourLife/similarity_scores
levenshtein.py
levenshtein.py
# source: http://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance def levenshtein(source, target): if len(source) < len(target): return levenshtein(target, source) # So now we have len(source) >= len(target). if len(target) == 0: return len(source) # We call tuple() to force strings to be used as sequences # ('c', 'a', 't', 's') - numpy uses them as values by default. source = np.array(tuple(source)) target = np.array(tuple(target)) # We use a dynamic programming algorithm, but with the # added optimization that we only need the last two rows # of the matrix. previous_row = np.arange(target.size + 1) for s in source: # Insertion (target grows longer than source): current_row = previous_row + 1 # Substitution or matching: # Target and source items are aligned, and either # are different (cost of 1), or are the same (cost of 0). current_row[1:] = np.minimum( current_row[1:], np.add(previous_row[:-1], target != s)) # Deletion (target grows shorter than source): current_row[1:] = np.minimum( current_row[1:], current_row[0:-1] + 1) previous_row = current_row return previous_row[-1]
mit
Python
a5b012db4cb4cc8a988c0ed37411194639dd1bbd
add tester.py module to pytools
spacetelescope/stsci.tools
lib/tester.py
lib/tester.py
#!/usr/bin/env python """ Package: pytools Author: Christopher Hanley Purpose: ======== Provide driver function for package tests. Dependencies: ============= - nose 0.10.4 or greater. Usage Example: ============== All packages will need to import jwtools.tester and add the following function to the __init__.py of their package: def test(*args,**kwds): thisdir = os.path.dirname(os.path.abspath(__file__)) pytools.tester.test(curdir=thisdir) This assumes that all software packages are installed with the structure: package/ __init__.py modules.py /tests Where the /tests subdirectory containts the nose tests. """ from __future__ import division import os,sys def test(*args,**kwds): """ Purpose: ======== test: Run refcore nosetest suite of tests. The tests are located in the /test directory of the installed modules. """ try: thisdir = kwds['curdir'] except KeyError: thisdir = os.path.dirname(os.path.abspath(__file__)) DIRS=['/tests'] args=[] for dirname in DIRS: args.append('-w') args.append(thisdir+dirname) result = False try: import nose, nose.core result = nose.run(argv=args) except ImportError: print "Nose 0.10.4 or greater is required for running tests." return result
bsd-3-clause
Python
6efc045d34f432723b52aa094c1caec3bf102e96
add sparse repeated updates benchmark
diogo149/treeano,diogo149/treeano,diogo149/treeano
benchmarks/sparse_repeated_updates.py
benchmarks/sparse_repeated_updates.py
import numpy as np import theano import theano.tensor as T fX = theano.config.floatX s = theano.shared(np.ones((10, 1), dtype=fX)) idxs = [0, 1, 1] fn = theano.function([], updates=[(s, T.inc_subtensor(s[idxs], s[idxs] ** 2))]) fn() print s.get_value()
apache-2.0
Python
4b43906004f9bfb6164bb2c0b95efaf1dbb881c8
add py
daiz713/Apricot,daiz713/Apricot,daiz713/Apricot,daiz713/Apricot
correction_image.py
correction_image.py
#!/usr/bin/python # -*- coding: utf-8 -*- # Project Apricot # Copyright (c) 2015 Daiki, Takanori.
mit
Python
fc44d4463045e458796d13b3c97b34cf6ba47f61
Add script to create the player pitch weights.
isuraed/bluechip
bluechip/player/createpitchweights.py
bluechip/player/createpitchweights.py
import random from player.models import Player, Pitch, PlayerPitchWeight #TODO: Need to centralize this function call. random.seed(123456789) pitch_records = Pitch.objects.all().order_by('id') pitches_count = pitch_records.count() for p in Player.objects.all(): weights = [] sum_weights = 0 for _ in xrange(pitches_count): mu = 1.0 / pitches_count sigma = (2.0 / 3.0) * mu w = random.normalvariate(mu, sigma) w = max(w, 0.0) weights.append(w) sum_weights += w # Normalize weights before creating records for i in xrange(len(weights)): weights[i] /= sum_weights j = 0 for pitch in pitch_records: ppw = PlayerPitchWeight(player=p, pitch=pitch, weight=weights[j]) ppw.save() j += 1
mit
Python
d3f68c385da4d2fa864ba748f41785be01c26c34
Add py solution for 551. Student Attendance Record I
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
py/student-attendance-record-i.py
py/student-attendance-record-i.py
class Solution(object): def checkRecord(self, s): """ :type s: str :rtype: bool """ A = False L = 0 for c in s: if c == 'L': L += 1 if L > 2: return False else: L = 0 if c == 'A': if A: return False else: A = True return True
apache-2.0
Python
a1ee4d90e0cf159f27274423b989c98844fbeba1
Create mytask1b.py
john136/exercises
ml/mytask1b.py
ml/mytask1b.py
""" Features The objective of this task is to explore the corpus, deals.txt. The deals.txt file is a collection of deal descriptions, separated by a new line, from which we want to glean the following insights: 1. What is the most popular term across all the deals? 2. What is the least popular term across all the deals? 3. How many types of guitars are mentioned across all the deals? """ #################################################### # Solution 2 of Q1: # Use topia.termextract 1.1.0 for term extraction # #################################################### # load term extraction library from topia.termextract import extract extractor = extract.TermExtractor() # define the trivial permissive filter extractor.filter = extract.permissiveFilter # load data openfile = open('..\data\deals.txt', 'r') d = {} numberguitars = 0 for line in openfile: terms = extractor(line) # empty if not terms: continue # take each term from terms for term in terms: # aggregate dictionary for each term if not (term[0] in d): d[term[0]] = 0 d[term[0]] += term[1] # count guitar if 'guitar' in term or 'guitars' in term: numberguitars += 1 else: if 'Guitar' in term or 'Guitars' in term: numberguitars += 1 v = list(d.values()) maxvalue = max(v) minvalue = min(v) maxkeys = [] minkeys = [] for k, v in d.items(): if v == maxvalue: maxkeys.append(k) if v == minvalue: minkeys.append(k) # output results print "1. the most popular terms\n", maxkeys #print "2. the least popular terms\n", minkeys print "3. the number of types of guitars", numberguitars
mit
Python
714e2e2ae5e8412ef522dc64666e6548307eec07
Add the init method to the topic model.
yiyangyi/cc98-tornado
model/topic.py
model/topic.py
class TopicModel(Query): def __init__(self, db): self.db = db self.table_name = "topic" super(TopicModel, self).__init__()
mit
Python
168c45fa913670c7f6d89ffc799fa9d13454d734
add multi-layer convolutional net for mnist
mnannt/mnist_experiments
multi-layer.py
multi-layer.py
""" solving mnist classification problem using tensorflow multi-layer architecture """ # Config BATCH_SIZE = 50 ITERATIONS = 20000 # Setup Logging import logging logging_format = '%(asctime)s - %(levelname)s - %(message)s' log_level = logging.DEBUG logging.basicConfig(filename='logfile.log',format=logging_format,level=log_level) # create logger logger = logging.getLogger() logger.setLevel(logging.DEBUG) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(log_level) # create formatter formatter = logging.Formatter(logging_format) # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) logger.debug('STARTING MULTI-LAYER MNIST') # Load MNIST dataset from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # Import TensorFlow and start interactive Session import tensorflow as tf session = tf.InteractiveSession() # Create tf placeholders for input data and predictions # x will be a 2d tensor with all images of the current batch * flattened pixel # of the input image. # y_ will be the probabilities for every image in the batch and every digit # class x = tf.placeholder(tf.float32, shape=[None, 784]) y_ = tf.placeholder(tf.float32, shape=[None, 10]) # functionality to create weight-variables and bias-variables def weight_variable(shape): initial = tf.truncated_normal(shape, stddev=0.1) return tf.Variable(initial) def bias_variable(shape): initial = tf.constant(0.1, shape=shape) return tf.Variable(initial) # convolution and pooling def conv2d(x, W): return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') def max_pool_2x2(x): return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') # first convolutional layer W_conv1 = weight_variable([5, 5, 1, 32]) b_conv1 = bias_variable([32]) # reshape x x_image = tf.reshape(x, [-1,28,28,1]) # convolve x_image h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) # second convolutional layer W_conv2 = weight_variable([5, 5, 32, 64]) b_conv2 = bias_variable([64]) h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2) h_pool2 = max_pool_2x2(h_conv2) # densely conntected layer W_fc1 = weight_variable([7 * 7 * 64, 1024]) b_fc1 = bias_variable([1024]) h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64]) h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1) # dropout keep_prob = tf.placeholder(tf.float32) h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob) # readout layer W_fc2 = weight_variable([1024, 10]) b_fc2 = bias_variable([10]) y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2 # train & evaluate model cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # start time measurement import time start = time.time() # initial logging logger.debug('starting computation (batch-size: %d, iterations=%d)'%(BATCH_SIZE, ITERATIONS)) session.run(tf.global_variables_initializer()) for i in range(ITERATIONS): batch = mnist.train.next_batch(BATCH_SIZE) if i%100 == 0: train_accuracy = accuracy.eval(feed_dict={ x:batch[0], y_: batch[1], keep_prob: 1.0}) logging.debug("step %d, training accuracy %g"%(i, train_accuracy)) time_elapsed = time.time() - start logger.debug('time elapsed: %.2fs'%(time_elapsed)) logger.debug('mean seconds/batch: %fs'%(time_elapsed/(i+1))) train_step.run(feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5}) # stop time measurement end = time.time() computation_time = end - start # print accuracy of test data & computation time logger.debug("test accuracy %g"%accuracy.eval(feed_dict={ x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})) logger.debug('computation time: %.2fs'%(computation_time))
mit
Python
c795f8e21d2b400134cb52ef7eae2cc7e26cfd99
Create ada.py
mduckles/CodeClub
ada.py
ada.py
mit
Python
028831c53d27452168b7a430eb713e01c966acb0
add privacy policy as first legal check
masschallenge/django-accelerator,masschallenge/django-accelerator
accelerator/migrations/0006_add_privacy_policy_legal_check.py
accelerator/migrations/0006_add_privacy_policy_legal_check.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-05-14 09:09 from __future__ import unicode_literals from django.db import migrations def add_privacy_policy_legal_check(apps, schema_editor): LegalCheck = apps.get_model('accelerator', 'LegalCheck') LegalCheck.objects.create( name='accepted_privacy_policy', title='The MassChallenge Privacy Policy', url='https://masschallenge.org/privacy-policy' ) def remove_privacy_policy_legal_check(apps, schema_editor): LegalCheck = apps.get_model('accelerator', 'LegalCheck') LegalCheck.objects.filter(name='accepted_privacy_policy').delete() class Migration(migrations.Migration): dependencies = [ ('accelerator', '0005_legalcheck_legalcheckacceptance'), ] operations = [ migrations.RunPython(add_privacy_policy_legal_check, remove_privacy_policy_legal_check), ]
mit
Python
a2975adeedcc4aa33ee8b63bd404675bb3453089
Add broker app.
LegionXI/pydarkstar,AdamGagorik/pydarkstar
apps/broker.py
apps/broker.py
""" Alter item database. """ import logging import sys import os # import hack to avoid PYTHONPATH try: import pydarkstar except ImportError: root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) root, dirs, files = next(os.walk(root)) if 'pydarkstar' in dirs: sys.path.insert(1, root) import pydarkstar else: raise import pydarkstar.logutils import pydarkstar.options import pydarkstar.common class Options(pydarkstar.options.Options): """ Reads options from config file, then from command line. """ def __init__(self): super(Options, self).__init__(config='broker.yaml', description=__doc__) # logging self.verbose = False # error, info, and debug self.silent = False # error only # input and output self.save = False # save config # logging self.add_argument('--verbose', action='store_true', help='report debug, info, and error') self.add_argument('--silent', action='store_true', help='report error only') # output self.add_argument('--save', action='store_true', help='save config file (and exit)') def main(): """ Main function. """ # get options opts = Options() opts.parse_args() pydarkstar.logutils.basicConfig( verbose=opts.verbose, silent=opts.silent, fname='broker.log') logging.debug('start') # log options opts.log_values(level=logging.INFO) # save options if opts.save: opts.save = False opts.dump() return def cleanup(): logging.info('exit\n') if __name__ == '__main__': with pydarkstar.logutils.capture(): main() cleanup()
mit
Python
edb498113441acb68511a478f2ec18c1be4f1384
Add tests for provision state commands
openstack/python-ironicclient,NaohiroTamura/python-ironicclient,openstack/python-ironicclient,NaohiroTamura/python-ironicclient
ironicclient/tests/functional/osc/v1/test_baremetal_node_provision_states.py
ironicclient/tests/functional/osc/v1/test_baremetal_node_provision_states.py
# Copyright (c) 2016 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ironicclient.tests.functional.osc.v1 import base class ProvisionStateTests(base.TestCase): """Functional tests for baremetal node provision state commands.""" def setUp(self): super(ProvisionStateTests, self).setUp() self.node = self.node_create() def test_deploy_rebuild_undeploy(self): """Deploy, rebuild and undeploy node. Test steps: 1) Create baremetal node in setUp. 2) Check initial "available" provision state. 3) Set baremetal node "deploy" provision state. 4) Check baremetal node provision_state field value is "active". 5) Set baremetal node "rebuild" provision state. 6) Check baremetal node provision_state field value is "active". 7) Set baremetal node "undeploy" provision state. 8) Check baremetal node provision_state field value is "available". """ show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("available", show_prop["provision_state"]) # deploy self.openstack('baremetal node deploy {0}'.format(self.node['uuid'])) show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("active", show_prop["provision_state"]) # rebuild self.openstack('baremetal node rebuild {0}'.format(self.node['uuid'])) show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("active", show_prop["provision_state"]) # undeploy self.openstack('baremetal node undeploy {0}'.format(self.node['uuid'])) show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("available", show_prop["provision_state"]) def test_manage_provide(self): """Manage and provide node back. Steps: 1) Create baremetal node in setUp. 2) Check initial "available" provision state. 3) Set baremetal node "manage" provision state. 4) Check baremetal node provision_state field value is "manageable". 5) Set baremetal node "provide" provision state. 6) Check baremetal node provision_state field value is "available". """ show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("available", show_prop["provision_state"]) # manage self.openstack('baremetal node manage {0}'.format(self.node['uuid'])) show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("manageable", show_prop["provision_state"]) # provide back self.openstack('baremetal node provide {0}'.format(self.node['uuid'])) show_prop = self.node_show(self.node['uuid'], ["provision_state"]) self.assertEqual("available", show_prop["provision_state"])
apache-2.0
Python
7dbab1a6615a49513fe16c74550ddf2f52b0f698
Create 4-keys-keyboard.py
tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,yiwen-luo/LeetCode,yiwen-luo/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,yiwen-luo/LeetCode
Python/4-keys-keyboard.py
Python/4-keys-keyboard.py
# Time: O(n) # Space: O(1) class Solution(object): def maxA(self, N): """ :type N: int :rtype: int """ if N <= 6: return N dp = [i for i in range(N+1)] for i in xrange(7, N+1): dp[i % 6] = max(dp[(i-4) % 6]*3,dp[(i-5) % 6]*4) return dp[N % 6]
mit
Python
be9c88b630ea243afdef3d87ac0b316bd3300281
Add 283-move-zeroes.py
mvj3/leetcode
283-move-zeroes.py
283-move-zeroes.py
""" Question: Move Zeroes Given an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements. For example, given nums = [0, 1, 0, 3, 12], after calling your function, nums should be [1, 3, 12, 0, 0]. Note: You must do this in-place without making a copy of the array. Minimize the total number of operations. Credits: Special thanks to @jianchao.li.fighter for adding this problem and creating all test cases. Performance: 1. Total Accepted: 15730 Total Submissions: 38045 Difficulty: Easy 2. Sorry. We do not have enough accepted submissions. """ class Solution(object): def moveZeroes(self, nums): """ :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. """ reached_zero_count = 0 for idx, num in enumerate(nums): if num == 0: reached_zero_count += 1 if num != 0: if reached_zero_count > 0: # make sure has reached at least a zero. nums[idx - reached_zero_count] = num nums[idx] = 0 def test_func(nums, result): Solution().moveZeroes(nums) assert nums == result, [nums, result] test_func([], []) test_func([0], [0]) test_func([1], [1]) test_func([0, 0], [0, 0]) test_func([0, 1], [1, 0]) test_func([1, 1], [1, 1]) test_func([0, 1, 0, 3, 12], [1, 3, 12, 0, 0]) test_func([0, 1, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0]) test_func([0, 1, 0, 0, 0, 3, 12, 0], [1, 3, 12, 0, 0, 0, 0, 0])
mit
Python
5d7f2fdfb1b850aacaf29ba76c7e5ed441e6db63
Create 32losmasgrandes.py
ManuComp/Los-2-mas-altos
32losmasgrandes.py
32losmasgrandes.py
#Integrantes del equipo #Chavez Pavon Jose Manuel #Ramirez Ramirez Servando #Saules Rojas David #Lopez Adriana import random #Funcino para crear una lista #La cual usaremos para simular las alturas de las 32 personas #La llenaremos de forma aleatoria def lista (): l = [] #Creamos la lista de las "alturas" for x in range (0,32): l.append(random.randint(1,300)) return l #Regresamos la lista #Funcion para obtener cual es la persona mas alta #Recorremos la lista de dos en dos pregutando cual es mayor #Los elementos mayores los agregamos a una nueva lista #A los elementos que fueron comparados los metemos en un diccionario #Para despues con ellos obtener el segundo mayor #var "lista" = La lista de las alturas #var "dic" = el diciconario de los elementos comparados def primero (lista, dic): a=0 #Iterador 'a' a utilizar para recorrer la lista inicializado en cero l2 = [] #Lista para ir agregando a los elementos mayores #Clausula de escape if len(lista) == 1: print lista[0] #Llamada a la funcion segundo, la cual nos dara el segundo mas alto segundo(dic[lista[0]]) return #Recorremos la lista buscando a los elementos mayores while a<len(lista): #Verificamos que elmento es mayor if lista[a] > lista[a+1]: l2.append(lista[a])#El mayor lo agregamos a l2 dic[lista[a]] = str(lista[a+1]) + " "#Al menor lo agregamos al diccionario pasandole como llave al elemento mayor #El caso contrario del if else: l2.append(lista[a+1])#El mayor lo agregamos a l2 dic[lista[a+1]] = str(lista[a]) + " "#Al menor lo agregamos al diccionario pasandole como llave al elemento mayor a+=2 #Aumentos nuestro iterador dos posiciones primero(l2, dic) #Llamada recursiva de la funcion #Funcion para obtener el segundo elementos mas grande #var "cadena" = la cadena que nos da el diccionario que tiene como llave #al elemento mas grande def segundo (cadena): repe = cadena.split()#Separamos la cadena por espacios con split() print max(repe)#Obtenemos el elemento mayor de la cadena con max() y lo imprimimos return l = lista()#Creamos la lista a ejecutar dicc={}#Diccionario para los elementos que fueron comparados pero no fueron mayores primero(l,dicc)#Llamada de la funcion primero
unlicense
Python
a7ccd7bc02476cfad85280ff1e742671453360de
Add Digital Outcomes and Specialists to frameworks
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
migrations/versions/420_dos_is_coming.py
migrations/versions/420_dos_is_coming.py
"""DOS is coming Revision ID: 420 Revises: 410_remove_empty_drafts Create Date: 2015-11-16 14:10:35.814066 """ # revision identifiers, used by Alembic. revision = '420' down_revision = '410_remove_empty_drafts' from alembic import op import sqlalchemy as sa from app.models import Framework def upgrade(): op.execute("COMMIT") op.execute("ALTER TYPE framework_enum ADD VALUE IF NOT EXISTS 'dos' after 'gcloud'") framework = Framework.query.filter(Framework.slug == 'digital-outcomes-and-specialists').first() if not framework: op.execute(""" INSERT INTO frameworks (name, framework, status, slug) values('Digital Outcomes and Specialists', 'dos', 'coming', 'digital-outcomes-and-specialists') """) def downgrade(): op.execute(""" DELETE FROM frameworks where slug='digital-outcomes-and-specialists' """)
mit
Python
273f0bd289d62c6980f095b0a8bb41a973b0678f
add import script for Bradford
DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations
polling_stations/apps/data_collection/management/commands/import_bradford.py
polling_stations/apps/data_collection/management/commands/import_bradford.py
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter class Command(BaseXpressDemocracyClubCsvImporter): council_id = 'E08000032' addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv' stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017.tsvJune2017.tsv' elections = ['parl.2017-06-08'] csv_delimiter = '\t'
bsd-3-clause
Python
12ba7e0c6db91f5ee46a1be9acaece110f98b911
add bigwig file reader
ronin-gw/PyMaSC,ronin-gw/PyMaSC
PyMaSC/bwreader.py
PyMaSC/bwreader.py
import os import wWigIO class BigWigFile(object): @staticmethod def wigToBigWig(wigfile, sizefile, bwfile): wWigIO.wigToBigWig(wigfile, sizefile, bwfile) @staticmethod def bigWigToWig(bwfile, wigfile): wWigIO.bigWigToWig(bwfile, wigfile) def __init__(self, path, chrom_size=None): if not os.path.exists(path) and path != '-': raise IOError("input file '{0}' dose not exist.".format(path)) elif path == '-': path = "stdin" prefix, ext = os.path.splitext(path)[0] if ext == "wig": bwfile = prefix + ".bw" if os.path.exists(bwfile): self.path = bwfile else: if chrom_size is None: raise IOError("Failed to convet wig to bigwig. 'chrom_size' file required.") BigWigFile.wigToBigWig(path, chrom_size, bwfile) self.path = bwfile else: self.path = path wWigIO.open(self.path) self.set_chromsizes() self.closed = False def set_chromsizes(self): self.chromsizes = wWigIO.getChromSize(self.path) def _getIntervals(self, chrom, begin, end): wigs = wWigIO.getIntervals(self.path, chrom, begin, end) if wigs == 1: raise ValueError("wWigIO.getIntervals doesn't have correct parameters.") if wigs == 2: raise ValueError("Fail to open BigWig file.") return wigs def fetch(self, chrom=None, begin=None, end=None): if chrom is None: chroms = self.chromsizes.keys() else: chroms = [chrom] if begin is None or begin < 0: begin = 0 if end is None: end = 0 for chrom in chroms: for wig in self._getIntervals(chrom, begin, end): yield chrom, wig[0], wig[1], wig[2] def close(self): if not self.closed: wWigIO.close(self.infile) self.closed = True def __enter__(self): return self def __exit__(self, type, value, traceback): self.close() def __del__(self): self.close()
mit
Python