repo_name
stringlengths
5
100
path
stringlengths
4
231
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
6
947k
score
float64
0
0.34
prefix
stringlengths
0
8.16k
middle
stringlengths
3
512
suffix
stringlengths
0
8.17k
CodeRiderz/rojak
rojak-analyzer/convert_13_labels_to_7_labels.py
Python
bsd-3-clause
1,929
0.004147
import csv from bs4 import BeautifulSoup from collections import Counter import re import os OUTPUT_NAME = os.getenv('OUTPUT_NAME', 'data_detikcom_labelled_740_7_class.csv') csv_file = open('data_detikcom_labelled_740.csv') csv_reader = csv.DictReader(csv_file) # Tranform individual label to candidate pair label label_map = { 'pos_ahok': 'pos_ahok_djarot', 'pos_djarot': 'pos_ahok_djarot', 'pos_anies': 'pos_anies_sandi', 'pos_sandi': 'pos_anies_sandi', 'pos_agus': 'pos_agus_sylvi', 'pos_sylvi': 'pos_agus_sylvi', 'neg_ahok': 'neg_ahok_djarot', 'neg_djarot': 'neg_ahok_djarot', 'neg_anies': 'neg_anies_sandi', 'neg_sandi': 'neg_anies_sandi', 'neg_agus': 'neg_agus_sylvi', 'neg_sylvi': 'neg_agus_sylvi', 'oot': 'oot' } fields = ['title', 'raw_content', 'labels'] train_file = open(OUTPUT_NAME, 'w') csv_writer = csv.DictWriter(train_file, fields) csv_writer.writeheader() for row in csv_reader: t
itle = row['title'] raw_content = row['raw_content'] labels
= [] label_1 = row['sentiment_1'] if label_1 != '': candidate_pair_label = label_map[label_1] if not candidate_pair_label in labels: labels.append(candidate_pair_label) label_2 = row['sentiment_2'] if label_2 != '': candidate_pair_label = label_map[label_2] if not candidate_pair_label in labels: labels.append(candidate_pair_label) label_3 = row['sentiment_3'] if label_3 != '': candidate_pair_label = label_map[label_3] if not candidate_pair_label in labels: labels.append(candidate_pair_label) # Skip content if label not exists if not labels: continue label_str = ','.join(labels) data_row = {'title': title, 'raw_content': raw_content, 'labels': label_str} csv_writer.writerow(data_row) print OUTPUT_NAME, 'created' csv_file.close() train_file.close()
Iotic-Labs/py-IoticAgent
src/IoticAgent/Datatypes.py
Python
apache-2.0
4,222
0.005921
# Copyright (c) 2016 Iotic Labs Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://github.com/Iotic-Labs/py-IoticAgent/blob/master/LICENSE # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Constants to hide XSD Datatypes used by Point Values and Properties These help to describe the data in a feed so the receiving Thing can know what kind of data to expect See also http://www.w3.org/TR/xmlschema-2/#built-in-datatypes """
from __future__ import unicode_literals BASE64 = 'base64Binary' '''Represents a sequence of binary octets (bytes) encoded according to RFC 2045, the standard defining the MIME types (look under "6.8 Base64 Content-Transfer-Encoding"). ''' BOOLEAN = 'boolean' '''A Boolean true or false value. Representations of true are "true" and "1"; false is denoted as "false" or "0".''' BYTE = 'byte' '''A signed 8-bit integer in the range [-128 -> +127]. Derived from the short datatype.''' UNSIGNED_BYTE = 'un
signedByte' '''An unsigned 8-bit integer in the range [0, 255]. Derived from the unsignedShort datatype.''' DATE = 'date' '''Represents a specific date. The syntax is the same as that for the date part of dateTime, with an optional time zone indicator. Example: "1889-09-24". ''' DATETIME = 'dateTime' ''' Represents a specific instant of time. It has the form YYYY-MM-DDThh:mm:ss followed by an optional time-zone suffix. `YYYY` is the year, `MM` is the month number, `DD` is the day number, `hh` the hour in 24-hour format, `mm` the minute, and `ss` the second (a decimal and fraction are allowed for the seconds part). The optional zone suffix is either `"Z"` for Universal Coordinated Time (UTC), or a time offset of the form `"[+|-]hh:mm"`, giving the difference between UTC and local time in hours and minutes. Example: "2004-10-31T21:40:35.5-07:00" is a time on Halloween 2004 in Mountain Standard time. The equivalent UTC would be "2004-11-01T04:40:35.5Z". ''' DECIMAL = 'decimal' '''Any base-10 fixed-point number. There must be at least one digit to the left of the decimal point, and a leading "+" or "-" sign is allowed. Examples: "42", "-3.14159", "+0.004". ''' DOUBLE = 'double' '''A 64-bit floating-point decimal number as specified in the IEEE 754-1985 standard. The external form is the same as the float datatype. ''' FLOAT = 'float' '''A 32-bit floating-point decimal number as specified in the IEEE 754-1985 standard. Allowable values are the same as in the decimal type, optionally followed by an exponent, or one of the special values "INF" (positive infinity), "-INF" (negative infinity), or "NaN" (not a number). The exponent starts with either "e" or "E", optionally followed by a sign, and one or more digits. Example: "6.0235e-23". ''' INT = 'int' '''Represents a 32-bit signed integer in the range [-2,147,483,648, 2,147,483,647]. Derived from the long datatype.''' INTEGER = 'integer' '''Represents a signed integer. Values may begin with an optional "+" or "-" sign. Derived from the decimal datatype.''' LONG = 'long' '''A signed, extended-precision integer; at least 18 digits are guaranteed. Derived from the integer datatype. ''' STRING = 'string' '''Any sequence of zero or more characters.''' TIME = 'time' '''A moment of time that repeats every day. The syntax is the same as that for dateTime, omitting everything up to and including the separator "T". Examples: "00:00:00" is midnight, and "13:04:00" is an hour and four minutes after noon. ''' URI = 'anyURI' ''' The data must conform to the syntax of a Uniform Resource Identifier (URI), as defined in RFC 2396 as amended by RFC 2732. Example: "http://www.nmt.edu/tcc/" is the URI for the New Mexico Tech Computer Center's index page. ''' IRI = 'IRI' '''Only for use with property API calls. Used to handle properties which require an IRI (URIRef) value.'''
KayaBaber/Computational-Physics
Assignment_3_chaos_and_pendulums/Pre-GitHub-versions/Phys440_Assignment03_Prob1 (1).py
Python
mit
1,186
0.009434
''' Kaya Baber Physics 440 - Computational Physics Assignment 3 Problem 1 Hamiltonian Dynamics of a Nonlinear Pendulum Consider a simple pendulum of length in gravitational field g. The frequency in the limit of small angles is Ω_0 ≡ radical(g/l) , but do not assume the limit of small angles for the following calculations. (a) Start with the Hamiltonian and develop two first order equations for the angle θ and its conjugate momentum p_θ . ((d^2)θ/d(t^2)) = - (g/l)sin(θ) θ_dot = P_θ/(ml)^2 P_θ_dot = -mlsin(θ) (b) Use a second-order leapfrog algorithm to compute the motion of the pendulum. If we choose a computational unit of time [T ] = Ω_0^(−1) , then 2π computational time units equals one period in the limit of small oscillations. Another way to think about it is that we can choose a set of units such that Ω_0 = 1. Make a graph of phase
space trajectories for a variety of initial conditions. (c) Li
ouville’s Theorem states that the phase-space volume of a infinitesimally close ensemble of states is conserved. Demonstrate Liouville’s Theorem by considering an ensemble of closely spaced initial conditions. '''
kuujo/active-redis
examples/set.py
Python
mit
554
0.00722
# Copyright (c) 2013 Jordan Halterman <jordan.halterman@gmail.com> # Se
e LICENSE for details. import sys, os sys.path.insert(0, os.path.dirname(os.path.dirname(__file__))) from active_redis import ActiveRedis redis = ActiveRedis() # Create an unnamed set. myset = redis.set() # Add items to the set. myset.add('foo') myset.add('bar') # We can also create a named set by passing a key to the constructor. myset = redis.set('myset') myset.add('foo') del myset myset = redis.set('myset') print myset # set([u'foo']) myse
t.delete() print myset # set()
zesk06/scores
tests/common_test.py
Python
mit
524
0
#!/usr/bin/env python # encoding:
utf-8 """A test module""" import datetime import tempfile import os import shutil import scores.common as common class TestCommon(object): """ A Test class""" def test_date_function(self): """Test""" a_date = datetime.datetime.now() a_date = a_date.replace(microsecond=0) tstamp = common.datetime_to_timestamp(a_date) assert tstamp > 0 converted_bck = common.timestamp_to_datetime(tstamp) assert converted_bck ==
a_date
lsantagata/smslib_ui
src/configuration_management_tools/migrations/0002_auto_20170731_0022.py
Python
gpl-3.0
608
0
# -*- coding: utf-8 -*- # Generated by Django 1.11.3 on 2017-07-31 00:22 from __future__ import unicode_literals from django.db import migra
tions class Migration(migrations.Migration): dependencies = [ ('configuration_management_tools', '0001_initial'), ]
operations = [ migrations.AlterModelOptions( name='smslibgateways', options={'managed': False, 'verbose_name': 'Gateways'}, ), migrations.AlterModelOptions( name='smslibnumberroutes', options={'managed': False, 'verbose_name': 'Routes'}, ), ]
volpino/Yeps-EURAC
scripts/scramble/scripts/generic.py
Python
mit
1,149
0.035683
import os, sys, shutil # change back to the build dir if os.path.dirname( sys.argv[0] ) != "": os.chdir( os.path.dirname( sys.argv[0] ) ) # find setuptools scramble_lib = os.path.join( "..", "..", "..", "lib" ) sys.path.append( scramble_lib ) import get_platform # fixes fat python 2.5 from ez_setup import use_setuptools use_setuptools( download_delay=8, to_dir=scramble_lib ) from setuptools import * # get the tag if os.access( ".galaxy_tag", os.F_OK ): tagfile = open( ".galaxy_tag", "r" ) tag = tagfile.readline().strip() else: tag = None # in case you're running this by hand from a dirty module source dir for dir in [ "build", "dist" ]:
if os.access( dir, os.F_OK ): print "scramble.py: removing dir:", dir shutil.rmtree( dir ) # reset args for distutils me = sys.argv[0] sys.argv = [ me ] sys.argv.append( "egg_info" ) if tag is not None: #sys.argv.append( "egg_info" ) sys.argv.append( "--tag-build=%s" %tag ) # svn revision (if any) is handled d
irectly in tag-build sys.argv.append( "--no-svn-revision" ) sys.argv.append( "bdist_egg" ) # do it execfile( "setup.py", globals(), locals() )
levilucio/SyVOLT
UMLRT2Kiltera_MM/graph_MT_post__OUT2.py
Python
mit
2,604
0.024578
""" __graph_MT_post__OUT2.py___________________________________________________________ Automatically generated graphical appearance ---> MODIFY DIRECTLY WI
TH CAUTION _______________________________________________________________________
___ """ import tkFont from graphEntity import * from GraphicalForm import * from ATOM3Constraint import * class graph_MT_post__OUT2(graphEntity): def __init__(self, x, y, semObject = None): self.semanticObject = semObject self.sizeX, self.sizeY = 172, 82 graphEntity.__init__(self, x, y) self.ChangesAtRunTime = 0 self.constraintList = [] if self.semanticObject: atribs = self.semanticObject.attributesToDraw() else: atribs = None self.graphForms = [] self.imageDict = self.getImageDict() def DrawObject(self, drawing, showGG = 0): self.dc = drawing if showGG and self.semanticObject: self.drawGGLabel(drawing) h = drawing.create_oval(self.translate([189.0, 62.0, 189.0, 62.0]), tags = (self.tag, 'connector'), outline = '', fill = '' ) self.connectors.append( h ) h = drawing.create_rectangle(self.translate([20.0, 20.0, 190.0, 100.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'moccasin') self.gf4 = GraphicalForm(drawing, h, "gf4") self.graphForms.append(self.gf4) font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0) h = drawing.create_text(self.translate([81.0, 37.0, 81.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_post__OUT2', width = '0', justify= 'left', stipple='' ) self.gf128 = GraphicalForm(drawing, h, 'gf128', fontObject=font) self.graphForms.append(self.gf128) helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" ) h = drawing.create_text(self.translate([-3, -3]), font=helv12, tags = (self.tag, self.semanticObject.getClass()), fill = "black", text=self.semanticObject.MT_label__.toString()) self.attr_display["MT_label__"] = h self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12) self.graphForms.append(self.gf_label) def postCondition( self, actionID, * params): return None def preCondition( self, actionID, * params): return None def getImageDict( self ): imageDict = dict() return imageDict new_class = graph_MT_post__OUT2
SpaceKatt/CSPLN
scripts/create_web_apps_linux.py
Python
gpl-3.0
5,043
0.00238
r''' <license> CSPLN_MaryKeelerEdition; Manages images to which notes can be added. Copyright (C) 2015-2016, Thomas Kercheval This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ___________________________________________________________</license> Description: For creating CSPLN webapps for LINUX, from scaffolding. Inputs: Version number, of MKE_vxx_xx_xx scaffolding file. Where each x corresponds to a current version number. Input as "xx_xx_xx" Number of web applications Outputs: Web applications, number depends on Input. Puts web2py.py in each web_app (not included in windows version). Puts scaffolding (current app version) into each web2py frame. Renames scaffolding application to 'MKE_Static_Name'. Currently: To Do: Done: ''' import os, sys, shutil from the_decider import resolve_relative_path as resolve_path def check_file_exist(path): """Check if the file at the given path exists.""" if os.path.exists(path): pass else: sys.exit('File {} doesn\'t exist'.format(path)) return None def grab_out_paths(number_apps, app_path): """ From the number of applications necessary, create a list of pathnames where we will create linux applications. """ out_dir = resolve_path(__file__, app_path) project_part = 'P{}' os = "linux" out_paths = [] for num in range(1, number_apps + 1): strin = project_part.format(str(num)) print "{part}, preparing for generation.".format(part=strin) out_paths.append(out_dir.format(os=os, pat=strin)) return out_paths def grab_web2py_frame(): """Grab the path of the web2py framework and check its existence.""" webframe = resolve_path(__file__, '../apps/scaffolding/linux/web2py') webdotpy = resolve_path(__file__,'../apps/scaffolding/common/web2py.py') check_file_exist(webdotpy) check_file_exist(webframe) return webframe, webdotpy def grab_scaffold_app(current_version): """Grab the path of our scaffolding and check its existence.""" mkever = '../apps/scaffolding/version/MKE_v{}'.format(current_version) mkever = resolve_path(__file__, mkever)
check_file_exist(mkever) return mkever def copy_webframez(number_apps, app_path): """ For each path where we intend to create a linux application, create a copy of the web2py framework and a modified copy of web2py.py. """ webframe, webdotpy = grab_web2py_frame() out_paths = grab_out_paths(number_apps, app_path) for pa
th in out_paths: shutil.copytree(webframe, os.path.join(path, 'web2py')) next_path = os.path.join(path, 'web2py') shutil.copy(webdotpy, next_path) print ' web2py frame copied to: {}'.format(path) print ' web2py.py copied to: {}'.format(next_path) return out_paths def modify_out_paths(int_paths): """ Modifies the out_paths from the locations of the web2py framework to where our applications will be generated. """ mod_out = [] addition = 'web2py/applications' for path in int_paths: new_path = os.path.join(path, addition) mod_out.append(new_path) return mod_out def grab_filename_from_path(in_path): """Input a path, return last chunck""" import ntpath head, tail = ntpath.split(in_path) return tail or ntpath.basename(head) def copy_app(version, out_paths): """ Creates an application for every copy of the web2py framework, from scaffolding application. """ scaff_app = grab_scaffold_app(version) filename = grab_filename_from_path(scaff_app) for path in out_paths: shutil.copytree(scaff_app, os.path.join(path, filename)) old_name = os.path.join(path, filename) new_name = os.path.join(path, 'MKE_Static_Name') os.rename(old_name, new_name) return None def deploy_scaffolding(version_now, num_apps, app_path): """ Deploys the web2py framework and the current version of our scaffolding, as many times as is necessary. """ print "\n Creating Linux applications...\n" + "_"*79 out_paths = copy_webframez(num_apps, app_path) new_paths = modify_out_paths(out_paths) copy_app(version_now, new_paths) print "_"*79 return None if __name__ == "__main__": NUM_APPS = 10 VERSION = '00_01_02' APP_PATH = '../apps/web_apps/{os}/{pat}' deploy_scaffolding(VERSION, NUM_APPS, APP_PATH)
mbayon/TFG-MachineLearning
venv/lib/python3.6/site-packages/pandas/tests/io/test_html.py
Python
mit
33,092
0
from __future__ import print_function import glob import os import re import warnings try: from importlib import import_module except ImportError: import_module = __import__ from distutils.version import LooseVersion import pytest import numpy as np from numpy.random import rand from pandas import (DataFrame, MultiIndex, read_csv, Timestamp, Index, date_range, Series) from pandas.compat import (map, zip, StringIO, string_types, BytesIO, is_platform_windows, PY3) from pandas.io.common import URLError, urlopen, file_path_to_url from pandas.io.html import read_html from pandas._libs.parsers import ParserError import pandas.util.testing as tm from pandas.util.testing import makeCustomDataframe as mkdf, network def _have_module(module_name): try: import_module(module_name) return True except ImportError: return False def _skip_if_no(module_name): if not _have_module(module_name): pytest.skip("{0!r} not found".format(module_name)) def _skip_if_none_of(module_names): if isinstance(module_names, string_types): _skip_if_no(module_names) if module_names == 'bs4': import bs4 if bs4.__version__ == LooseVersion('4.2.0'): pytest.skip("Bad version of bs4: 4.2.0") else: not_found = [module_name for module_name in module_names if not _have_module(module_name)] if set(not_found) & set(module_names): pytest.skip("{0!r} not found".format(not_found)) if 'bs4' in module_names: import bs4 if bs4.__version__ == LooseVersion('4.2.0'): pytest.skip("Bad version of bs4: 4.2.0") DATA_PATH = tm.get_data_path() def assert_framelist_equal(list1, list2, *args, **kwargs): assert len(list1) == len(list2), ('lists are not of equal size ' 'len(list1) == {0}, ' 'len(list2) == {1}'.format(len(list1), len(list2))) msg = 'not all list elements are DataFrames' both_frames = all(map(lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame), list1, list2)) assert both_frames, msg for frame_i, frame_j in zip(list1, list2): tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs) assert not frame_i.empty, 'frames are both empty' def test_bs4_version_fails(): _skip_if_none_of(('bs4', 'html5lib')) import bs4 if bs4.__version__ == LooseVersion('4.2.0'): tm.assert_raises(AssertionError, read_html, os.path.join(DATA_PATH, "spam.html"), flavor='bs4') class ReadHtmlMixin(object): def read_html(self, *args, **kwargs): kwargs.setdefault('flavor', self.flavor) return read_html(*args, **kwargs) class TestReadHtml(ReadHtmlMixin): flavor = 'bs4' spam_data = os.path.join(DATA_PATH, 'spam.html') spam_data_kwargs = {} if PY3: spam_data_kwargs['encoding'] = 'UTF-8' banklist_data = os.path.join(DATA_PATH, 'banklist.html') @classmethod def setup_class(cls): _skip_if_none_of(('bs4', 'html5lib')) def test_to_html_compat(self): df = mkdf(4, 3, data_gen_f=lambda *args: rand(), c_idx_names=False, r_idx_names=False).applymap('{0:.3f}'.format).astype(float) out = df.to_html() res = self.read_html(out, attrs={'class': 'dataframe'}, index_col=0)[0] tm.assert_frame_equal(res, df) @network def test_banklist_url(self): url = 'http://www.fdic.gov/bank/individual/failed/banklist.html' df1 = self.read_html(url, 'First Federal Bank of Florida', attrs={"id": 'table'}) df2 = self.read_html(url, 'Metcalf Bank', attrs={'id': 'table'}) assert_framelist_equal(df1, df2) @network def test_spam_url(self): url = ('http://ndb.nal.usda.gov/ndb/foods/show/1732?fg=&man=&' 'lfacet=&format=&count=&max=25&offset=&sort=&qlookup=spam') df1 = self.read_html(url, '.*Water.*') df2 = self.read_html(url, 'Unit') assert_framelist_equal(df1, df2) @tm.slow def test_banklist(self): df1 = self.read_html(self.banklist_data, '.*Florida.*', attrs={'id': 'table'}) df2 = self.read_html(self.banklist_data, 'Metcalf Bank', attrs={'id': 'table'}) assert_framelist_equal(df1, df2) def test_spam_no_types(self): # infer_types removed in #10892 df1 = self.read_html(self.spam_data, '.*Water.*') df2 = self.read_html(self.spam_data, 'Unit') assert_framelist_equa
l(df1, df2) assert df1[0].iloc[0, 0] == 'Proximates' assert df1[0].columns[0] == 'Nutrient' def test_spam_with_types(self): df1 = self.read_html(self.spam_data, '.*Water.*') df2 = self.read_h
tml(self.spam_data, 'Unit') assert_framelist_equal(df1, df2) assert df1[0].iloc[0, 0] == 'Proximates' assert df1[0].columns[0] == 'Nutrient' def test_spam_no_match(self): dfs = self.read_html(self.spam_data) for df in dfs: assert isinstance(df, DataFrame) def test_banklist_no_match(self): dfs = self.read_html(self.banklist_data, attrs={'id': 'table'}) for df in dfs: assert isinstance(df, DataFrame) def test_spam_header(self): df = self.read_html(self.spam_data, '.*Water.*', header=1)[0] assert df.columns[0] == 'Proximates' assert not df.empty def test_skiprows_int(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1) df2 = self.read_html(self.spam_data, 'Unit', skiprows=1) assert_framelist_equal(df1, df2) def test_skiprows_xrange(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=range(2))[0] df2 = self.read_html(self.spam_data, 'Unit', skiprows=range(2))[0] tm.assert_frame_equal(df1, df2) def test_skiprows_list(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=[1, 2]) df2 = self.read_html(self.spam_data, 'Unit', skiprows=[2, 1]) assert_framelist_equal(df1, df2) def test_skiprows_set(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=set([1, 2])) df2 = self.read_html(self.spam_data, 'Unit', skiprows=set([2, 1])) assert_framelist_equal(df1, df2) def test_skiprows_slice(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=1) df2 = self.read_html(self.spam_data, 'Unit', skiprows=1) assert_framelist_equal(df1, df2) def test_skiprows_slice_short(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2)) df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(2)) assert_framelist_equal(df1, df2) def test_skiprows_slice_long(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=slice(2, 5)) df2 = self.read_html(self.spam_data, 'Unit', skiprows=slice(4, 1, -1)) assert_framelist_equal(df1, df2) def test_skiprows_ndarray(self): df1 = self.read_html(self.spam_data, '.*Water.*', skiprows=np.arange(2)) df2 = self.read_html(self.spam_data, 'Unit', skiprows=np.arange(2)) assert_framelist_equal(df1, df2) def test_skiprows_invalid(self): with tm.assert_raises_regex(TypeError, 'is not a valid type ' 'for skipping rows'): self.read_html(self.spam_data, '.*Water.*', skiprows='asdf') def test_index(self): df1 = self.read_html(self.spam_data, '.*Water.*', index_col=0) df2 = self.read_html(self.spam_data, 'Unit', index_col=0) assert_framelist_equal(df1, df2) def test_header_and_index_no_types(self): df1 = self.read_html(self.spam_data, '.
scream7/leetcode
algorithms/python/28.py
Python
apache-2.0
315
0
class Solution(object): def strStr(self, haystack, needle): """ :type haystack: str :type nee
dle: str :rtype: int """ for i in range(len(haystack)-len(needle) + 1): if haystack[i: i + len(needle)] == needle: return i return
-1
blitzagency/django-chatterbox
chatterbox/utils/youtube.py
Python
mit
4,331
0.000693
import logging from ..models import Activity from .date import activity_stream_date_to_datetime, datetime_to_string log = logging.getLogger(__name__) def activity_from_dict(data): log.debug("Converting YouTube dict to Activity Model") activity_dict = activity_dict_from_dict(data) return Activity.from_activity_dict(activity_dict) def activity_dict_from_dict(blob): log.debug("Converting YouTube dict to activity dict: %s", blob) stream_object = {} stream_object["@context"] = "http://www.w3.org/ns/activitystreams" stream_object["@type"] = "Activity" date = blob.get("snippet").get("publishedAt") date = activity_stream_date_to_datetime(date) stream_object["published"] = datetime_to_string(date) stream_object["provider"] = { "@type": "Service", "displayName": "YouTube" } snippet = blob.get("snippet") stream_object["actor"] = { "@type": "Person", "@id": "https://www.youtube.com/user/{}".format(snippet.get("channelTitle")), "displayName": snippet.get("channelTitle"), } stream_object["object"] = { "@id": "https://www.youtube.com/watch?v={}".format(blob.get("id").get("videoId")), "@type": "Video", "displayName": snippet.get("title"), "url": [{ "href": "https://www.youtube.com/watch?v={}".format(blob.get("id").get("videoId")), "@type": "Link" }], "content": snippet.get("description"), "youtube:etag": blob.get("etag"), "youtube:kind": blob.get("kind"), "youtube:id:kind": blob.get("id").get("kind"), "youtube:channelId": snippet.get("channelId"), "youtube:liveBroadcastContent": snippet.get("liveBroadcastContent"), "image": [ { "@type": "Link", "href": snippet.get("thumbnails").get("default").get("url"), "mediaType": "image/jpeg", "youtube:resolution": "default" }, { "@type": "Link", "href": snippet.get("thumbnails").get("medium").get("url"), "mediaType": "image/jpeg", "youtube:resolution": "medium" }, { "@type": "Link", "href": snippet.get("thumbnails").get("high").get("url"), "mediaType": "image/jpeg", "youtube:resolution": "high" }, ] } return stream_object """ """ """ { "@context": "http://www.w3.org/ns/activitystreams", "@type": "Activity", ------ Abstract wrapper "published": "2015-02-10T15:04:55Z", "provider": { "@type": "Service", "displayName": "Twitter|FaceBook|Instagram|YouTube" }, "actor": { "@type": "Person
", "@id": "https://www.twitter.com/{{user.screen_name}} "displayName": "Martin Smith", "url": "http://example.org/martin", "image": { "@type": "Link", "href": "http://example.org/martin/image.jpg", "mediaType": "image/j
peg" } }, ------------------------------------------------------ "object" : { "@id": "urn:example:blog:abc123/xyz", "@type": "Note", "url": "http://example.org/blog/2011/02/entry", "content": "This is a short note" }, ------------------------------------------------------ "object" : { "@id": "urn:example:blog:abc123/xyz", "@type": "Video", "displayName": "A Simple Video", "url": "http://example.org/video.mkv", "duration": "PT2H" }, ------------------------------------------------------ "object" : { "@id": "urn:example:blog:abc123/xyz", "@type": "Image", "displayName": "A Simple Image", "content": "any messages?" "url": [ { "@type": "Link", "href": "http://example.org/image.jpeg", "mediaType": "image/jpeg" }, { "@type": "Link", "href": "http://example.org/image.png", "mediaType": "image/png" } ] }, } """
tzechiop/PANYNJ-Regression-Analysis-for-Toll-Traffic-Elasticity
mergeResults.py
Python
mit
2,928
0.003415
# -*- coding: utf-8 -*- """ Created on Fri Oct 7 13:10:05 2016 @author: thasegawa """ import os import pandas as pd economic_list = list(pd.read_excel('data\\fields\\economicIndicators_Real.xlsx', header=None)[0]) #fuel_list = list(pd.read_excel('data\\fields\\fuel_binary.xlsx', header=None)[0]) + [None] fuel_list = list(pd.read_excel('data\\fields\\fuel_binary.xlsx', header=None)[
0]) # Iter
ate through each regression result and retrieve R^2 and coefficient group_list = ['pathmid', 'pathnj', 'pathnyc', 'pathtotal', 'pathwtc'] path = 'data\\regress_out\\all_v2' outcol_list = ['PATH Group', 'R^2', 'Elasticity Coefficient', 'Economic Variable', 'Economic Coefficient', 'Fuel Variable', 'Fuel Coefficient', 'M1 Coefficient', 'M2 Coefficient', 'M3 Coefficient', 'M4 Coefficient', 'M5 Coefficient', 'M6 Coefficient', 'M7 Coefficient', 'M8 Coefficient', 'M9 Coefficient', 'M10 Coefficient', 'M11 Coefficient', 'Recession_FRED Coefficient', 'Sandy Coefficient', 'Snow_Median Coefficient', 'Intercept'] out_dict = {key: [] for key in outcol_list} fname_list = os.listdir(path) for index, group in enumerate(group_list): R2_list = [] coef_list = [] for fuel in fuel_list: for economic in economic_list: fname = 'regress_summary_{0}_{1}.txt'.format(group, economic) with open(os.path.join(path,fname)) as f: lines = f.readlines() for line in lines: if line[:9] == 'R-squared': R2 = float(line.strip().split(' ')[-1]) linesplit = line.split(' ') if (len(linesplit) > 2): if (linesplit[1] == 'Fare-1Trip'): coef = float(linesplit[2]) if R2 is not None: R2_list.append(R2) else: R2_list.append(-999) if coef is not None: coef_list.append(coef) else: coef_list.append(-999) if index == 0: R2_out = pd.DataFrame({'Economic Indicator': economic_list, group: R2_list}) coef_out = pd.DataFrame({'Economic Indicator': economic_list, group: coef_list}) else: R2_out[group] = R2_list coef_out[group] = coef_list R2_out.to_excel('data\\regress_out\\regresssummary_R2.xlsx', index = False) coef_out.to_excel('data\\regress_out\\regresssummary_coef.xlsx', index = False)
awsdocs/aws-doc-sdk-examples
lambda_functions/codecommit/MyCodeCommitFunction.py
Python
apache-2.0
2,083
0.005281
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # This file is licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR #
CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # snippet-sourcedescription:[MyCodeCommitFunction.py demonstrates how to use an AWS Lambda function to return the URLs used for cloning an AWS CodeCommi
t repository to a CloudWatch log.] # snippet-service:[codecommit] # snippet-keyword:[Python] # snippet-sourcesyntax:[python] # snippet-sourcesyntax:[python] # snippet-keyword:[AWS CodeCommit] # snippet-keyword:[Code Sample] # snippet-keyword:[GetRepository] # snippet-sourcetype:[full-example] # snippet-sourceauthor:[AWS] # snippet-sourcedate:[2016-03-07] # snippet-start:[codecommit.python.MyCodeCommitFunction.complete] import json import boto3 codecommit = boto3.client('codecommit') def lambda_handler(event, context): #Log the updated references from the event references = { reference['ref'] for reference in event['Records'][0]['codecommit']['references'] } print("References: " + str(references)) #Get the repository from the event and show its git clone URL repository = event['Records'][0]['eventSourceARN'].split(':')[5] try: response = codecommit.get_repository(repositoryName=repository) print("Clone URL: " +response['repositoryMetadata']['cloneUrlHttp']) return response['repositoryMetadata']['cloneUrlHttp'] except Exception as e: print(e) print('Error getting repository {}. Make sure it exists and that your repository is in the same region as this function.'.format(repository)) raise e # snippet-end:[codecommit.python.MyCodeCommitFunction.complete]
huran2014/huran.github.io
wot_gateway/usr/lib/python2.7/xml/dom/expatbuilder.py
Python
gpl-2.0
36,382
0.00044
"""Facility to use the Expat parser to load a minidom instance from a string or file. This avoids all the overhead of SAX and pulldom to gain performance. """ # Warning! # # This module is tightly bound to the implementation details of the # minidom DOM and can't be used with other DOM implementations. This # is due, in part, to a lack of appropriate methods in the DOM (there is # no way to create Entity and Notation nodes via the DOM Level 2 # interface), and for performance. The later is the cause of some fairly # cryptic code. # # Performance hacks: # # - .character_data_handler() has an extra case in which continuing # data is appended to an existing Text node; this can be a # speedup since pyexpat can break up character data into multiple # callbacks even though we set the buffer_text attribute on the # parser. This also gives us the advantage that we don't need a # separate normalization pass. # # - Determining that a node exists is done using an identity comparison # with None rather than a truth test; this avoids searching for and # calling any methods on the node object if it exists. (A rather # nice speedup is achieved this way as well!) from xml.dom import xmlbuilder, minidom, Node from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE from xml.parsers import expat from xml.dom.minidom import _append_child, _set_attribute_node from xml.dom.NodeFilter import NodeFilter from xml.dom.minicompat import * TEXT_NODE = Node.TEXT_NODE CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE DOCUMENT_NODE = Node.DOCUMENT_NODE FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT theDOMImplementation = minidom.getDOMImplementation() # Expat typename -> TypeInfo _typeinfo_map = { "CDATA": minidom.TypeInfo(None, "cdata"), "ENUM": minidom.TypeInfo(None, "enumeration"), "ENTITY": minidom.TypeInfo(None, "entity"), "ENTITIES": minidom.TypeInfo(None, "entities"), "ID": minidom.TypeInfo(None, "id"), "IDREF": minidom.TypeInfo(None, "idref"), "IDREFS": minidom.TypeInfo(None, "idrefs"), "NMTOKEN": minidom.Type
Info(None, "nmtoken"), "NMTOKENS": minidom.TypeInfo(None, "nmtokens"), } class ElementInfo(object): __slots__ = '_attr_info', '_model', 'tagName' def __init__(self, tagName, model=None): self.tagName = tagName self._attr_info = [] self._model = model def __getstate__(self): return self._attr_info, self._model, sel
f.tagName def __setstate__(self, state): self._attr_info, self._model, self.tagName = state def getAttributeType(self, aname): for info in self._attr_info: if info[1] == aname: t = info[-2] if t[0] == "(": return _typeinfo_map["ENUM"] else: return _typeinfo_map[info[-2]] return minidom._no_type def getAttributeTypeNS(self, namespaceURI, localName): return minidom._no_type def isElementContent(self): if self._model: type = self._model[0] return type not in (expat.model.XML_CTYPE_ANY, expat.model.XML_CTYPE_MIXED) else: return False def isEmpty(self): if self._model: return self._model[0] == expat.model.XML_CTYPE_EMPTY else: return False def isId(self, aname): for info in self._attr_info: if info[1] == aname: return info[-2] == "ID" return False def isIdNS(self, euri, ename, auri, aname): # not sure this is meaningful return self.isId((auri, aname)) def _intern(builder, s): return builder._intern_setdefault(s, s) def _parse_ns_name(builder, name): assert ' ' in name parts = name.split(' ') intern = builder._intern_setdefault if len(parts) == 3: uri, localname, prefix = parts prefix = intern(prefix, prefix) qname = "%s:%s" % (prefix, localname) qname = intern(qname, qname) localname = intern(localname, localname) else: uri, localname = parts prefix = EMPTY_PREFIX qname = localname = intern(localname, localname) return intern(uri, uri), localname, prefix, qname class ExpatBuilder: """Document builder that uses Expat to build a ParsedXML.DOM document instance.""" def __init__(self, options=None): if options is None: options = xmlbuilder.Options() self._options = options if self._options.filter is not None: self._filter = FilterVisibilityController(self._options.filter) else: self._filter = None # This *really* doesn't do anything in this case, so # override it with something fast & minimal. self._finish_start_element = id self._parser = None self.reset() def createParser(self): """Create a new parser object.""" return expat.ParserCreate() def getParser(self): """Return the parser object, creating a new one if needed.""" if not self._parser: self._parser = self.createParser() self._intern_setdefault = self._parser.intern.setdefault self._parser.buffer_text = True self._parser.ordered_attributes = True self._parser.specified_attributes = True self.install(self._parser) return self._parser def reset(self): """Free all data structures used during DOM construction.""" self.document = theDOMImplementation.createDocument( EMPTY_NAMESPACE, None, None) self.curNode = self.document self._elem_info = self.document._elem_info self._cdata = False def install(self, parser): """Install the callbacks needed to build the DOM into the parser.""" # This creates circular references! parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler parser.StartElementHandler = self.first_element_handler parser.EndElementHandler = self.end_element_handler parser.ProcessingInstructionHandler = self.pi_handler if self._options.entities: parser.EntityDeclHandler = self.entity_decl_handler parser.NotationDeclHandler = self.notation_decl_handler if self._options.comments: parser.CommentHandler = self.comment_handler if self._options.cdata_sections: parser.StartCdataSectionHandler = self.start_cdata_section_handler parser.EndCdataSectionHandler = self.end_cdata_section_handler parser.CharacterDataHandler = self.character_data_handler_cdata else: parser.CharacterDataHandler = self.character_data_handler parser.ExternalEntityRefHandler = self.external_entity_ref_handler parser.XmlDeclHandler = self.xml_decl_handler parser.ElementDeclHandler = self.element_decl_handler parser.AttlistDeclHandler = self.attlist_decl_handler def parseFile(self, file): """Parse a document from a file object, returning the document node.""" parser = self.getParser() first_buffer = True try: while 1: buffer = file.read(16*1024) if not buffer: break parser.Parse(buffer, 0) if first_buffer and self.document.documentElement: self._setup_subset(buffer) first_buffer = False parser.Parse("", True) except ParseEscape: pass doc = self.document self.reset() self._parser = None return doc def parseString(self, string): """Parse a document from a string, returning the document node.""" parser = self.getParser() try: parser.Parse(string, T
c3nav/c3nav
src/c3nav/mapdata/migrations/0048_ramp.py
Python
apache-2.0
1,532
0.004569
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-11-17 19:24 from __future__ import unicode_literals import c3nav.mapdata.fields from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('mapdata', '0047_remove_mapupdate_changed_geometries'), ] operations = [ migrations.CreateModel( name='Ramp', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('minx', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min x coordinate')), ('miny', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='min y coordinate')), ('maxx', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max x coordinate')), ('maxy', models.DecimalField(db_index=True, decimal_places=2, max_digits=6, verbose_name='max y coordinate')), ('geometry', c3nav.mapdata.fields.GeometryField(default=None, geomtype='polygon')), ('space', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_
name='ramps', to='mapdata.Space', verbose_name='space')), ], options={ 'verbose_name': 'Ramp',
'verbose_name_plural': 'Ramps', 'default_related_name': 'ramps', }, ), ]
feroda/lessons-python4beginners
students/2016-09-04/simone-cosma/fibonacci.py
Python
agpl-3.0
1,035
0.016425
def _checkInput(index): if index < 0: raise ValueError("Indice negativo non supportato [{}]".format(index)) elif type(index) != int: raise TypeError("Inserire un intero [tipo input {}]".format(type(index).__name__)) def fib_from_string(index): _checkInput(index) serie = "0 1 1 2 3 5 8".replace(" ", "") return int(serie[index]) def fib_from_list(index): _checkInput(index) serie = [0,1,1,2,3,5,8] return serie[index] def fib_from_algo(index): _checkInput(index) current_number = current_index = 0 base = 1 while current_index < index: old_ba
se = current_number current_number = current_number + base base = old_base current_index += 1 pass return current_number def recursion(index): if index <= 1: return index return recursion(index - 1) + recursion(index -
2) def fib_from_recursion_func(index): _checkInput(index) return recursion(index) calculate = fib_from_recursion_func
ubccr/tacc_stats
analyze/process_pickles/miss_vs_stall.py
Python
lgpl-2.1
5,040
0.044048
#!/usr/bin/env python import analyze_conf import sys import datetime, glob, job_stats, os, subprocess, time import operator import matplotlib # Set the matplotlib output mode from config if it exists if not 'matplotlib.pyplot' in sys.modules: try: matplotlib.use(analyze_conf.matplotlib_output_mode) except NameError: matplotlib.use('pdf') import matplotlib.pyplot as plt import numpy import scipy, scipy.stats import argparse import tspl, ts
pl_utils, lariat_utils, plot import math import multiprocessing, functools, itertools import cPickle as pickle def do_work(file,mintime,wayness,lariat_dict): retval=(None,None,None,None,None) res=plot.get_data(file,mintime,wayness,lariat_dict) if (res is None): return retval (ts, ld, tmid, read_rate, write_rate, stall_rate, clock_rate, avx_rate, sse_rate, inst_rate, meta_rate, l1_rat
e, l2_rate, l3_rate, load_rate, read_frac, stall_frac) = res # return (scipy.stats.tmean(stall_frac), # scipy.stats.tmean((load_rate - (l1_rate + l2_rate + # l3_rate))/load_rate)) mean_mem_rate=scipy.stats.tmean(read_rate+write_rate)*64.0 ename=ld.exc.split('/')[-1] ename=tspl_utils.string_shorten(ld.comp_name(ename,ld.equiv_patterns),8) if ename=='unknown': return retval flag=False if mean_mem_rate < 75.*1000000000./16.: flag=True return (scipy.stats.tmean(stall_frac), scipy.stats.tmean((load_rate - (l1_rate))/load_rate), scipy.stats.tmean(clock_rate/inst_rate),ename, flag) def main(): parser = argparse.ArgumentParser(description='Look for imbalance between' 'hosts for a pair of keys') parser.add_argument('filearg', help='File, directory, or quoted' ' glob pattern', nargs='?',default='jobs') parser.add_argument('-p', help='Set number of processes', nargs=1, type=int, default=[1]) n=parser.parse_args() filelist=tspl_utils.getfilelist(n.filearg) procs = min(len(filelist),n.p[0]) job=pickle.load(open(filelist[0])) jid=job.id epoch=job.end_time ld=lariat_utils.LariatData(jid,end_epoch=epoch,daysback=3,directory=analyze_conf.lariat_path) if procs < 1: print 'Must have at least one file' exit(1) pool = multiprocessing.Pool(processes=procs) partial_work=functools.partial(do_work,mintime=3600.,wayness=16,lariat_dict=ld.ld) results=pool.map(partial_work,filelist) fig1,ax1=plt.subplots(1,1,figsize=(20,8),dpi=80) fig2,ax2=plt.subplots(1,1,figsize=(20,8),dpi=80) maxx=0. for state in [ True, False ]: stalls=[] misses=[] cpis=[] enames=[] for (s,m,cpi,ename,flag) in results: if (s != None and m > 0. and m < 1.0 and flag==state): stalls.extend([s]) misses.extend([m]) cpis.extend([cpi]) enames.extend([ename]) markers = itertools.cycle(('o','x','+','^','s','8','p', 'h','*','D','<','>','v','d','.')) colors = itertools.cycle(('b','g','r','c','m','k','y')) fmt={} for e in enames: if not e in fmt: fmt[e]=markers.next()+colors.next() for (s,c,e) in zip(stalls,cpis,enames): # ax1.plot(numpy.log10(1.-(1.-s)),numpy.log10(c), maxx=max(maxx,1./(1.-s)) ax1.plot((1./(1.-s)),(c), marker=fmt[e][0], markeredgecolor=fmt[e][1], linestyle='', markerfacecolor='None', label=e) ax1.hold=True ax2.plot((1./(1.-s)),(c), marker=fmt[e][0], markeredgecolor=fmt[e][1], linestyle='', markerfacecolor='None', label=e) ax2.hold=True #ax.plot(numpy.log10(stalls),numpy.log10(cpis),fmt) #ax.plot(numpy.log10(1.0/(1.0-numpy.array(stalls))),numpy.log10(cpis),fmt) ax1.set_xscale('log') ax1.set_xlim(left=0.95,right=1.05*maxx) ax1.set_yscale('log') box = ax1.get_position() ax1.set_position([box.x0, box.y0, box.width * 0.45, box.height]) box = ax2.get_position() ax2.set_position([box.x0, box.y0, box.width * 0.45, box.height]) handles=[] labels=[] for h,l in zip(*ax1.get_legend_handles_labels()): if l in labels: continue else: handles.extend([h]) labels.extend([l]) ax1.legend(handles,labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1,ncol=4) ax1.set_xlabel('log(Cycles per Execution Cycle)') ax1.set_ylabel('log(CPI)') handles=[] labels=[] for h,l in zip(*ax2.get_legend_handles_labels()): if l in labels: continue else: handles.extend([h]) labels.extend([l]) ax2.legend(handles,labels,bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., numpoints=1,ncol=4) ax2.set_xlabel('Cycles per Execution Cycle') ax2.set_ylabel('CPI') fname='miss_v_stall_log' fig1.savefig(fname) fname='miss_v_stall' fig2.savefig(fname) plt.close() if __name__ == '__main__': main()
nomel/beaglebone
pru-gpio/templates.py
Python
unlicense
3,354
0.004472
def populate(template, values): # template is a string containing tags. the tags get replaced with the entries from the values dictionary. # example: # > template = "hello there <<your name>>!" # > values = {"your name": "bukaroo banzai"} # > populateTemplate( template, values) # "hello there bukaroo banzai!" result = template["text"] name = "None" try: for name in template["parameters"]: result = result.replace("<<%s>>" % name, str(values[name])) except KeyError: print "Template value dictionary is missing the entry:", name return result ### dts file template dtsContents = { "parameters": ("type", "part number", "header names", "hardware names", "fragments"), "text": """/* * Easy <<type>> mux control of <<header names>> (<<hardware names>>) */ /dts-v1/; /plugin/; / { compatible = "ti,beaglebone", "ti,beaglebone-black"; /* identification */ part-number = "<<part number>>"; /* version = "00A0"; */ /* state the resources this cape uses */ exclusive-use = /* the pin header uses */ <<header names>>, /* the hardware IP uses */ <<hardware names>>; <<fragments>> }; """ } ### fragment template fragment = { "parameters": ("type", "index", "header name", "clean header name", "state name", "offset and mux list"), "text": """ /* <<state name>> state */ fragment@<<index>> { target = <&am33xx_pinmux>; __overlay__ { <<type>>_<<clean header name>>_<<state name>>: pinmux_<<type>>_<<header name>>_<<state name>> { pinctrl-single,pins = < <<offset and mux list>> >; }; }; }; """ } ### pinctrlTemplate template pinctrl = { "parameters": ("type", "index", "clean header name", "state name"), "text": """pinctrl-<<index>> = <&<<type>>_<<clean header name>>_<<state name>>>;""" } pinmuxHelper = { "parameters": ("type", "index", "header name", "state names list", "pinctrl list", "gpio index"), "text": """ fragment@<<index>> { target = <&ocp>; __overlay__ { <<type>>-<<header name>>_gpio<<gpio index>> { compatible = "bone-pinmux-helper"; status = "okay"; pinctrl-names = <<state names list>>; <<pinctrl list>> }; }; }; """ } ledHelper = { "parameters": ("index", "header name", "gpio bank + 1", "gpio pin", "output pinctrl entry"), "text": """ fragment@<<index>> { target = <&ocp>; __overlay__ { led_<<header name>>_helper { compatible = "gpio-leds"; pinctrl-names = "default"; <<output pinctrl entry>> leds-<<header name>> {
label = "leds:<<header name>>"; gpios = <&gpio<<gpio bank + 1>> <<gpio pin>> 0>; linux,default-trigger = "none"; default-state = "off"; }; }; }; }; """ } prussHelper = { "parameters": ("status", "index"), "text": """ fragment@<<index>> { target = <&pruss>
; __overlay__ { status = "<<status>>"; }; }; """ }
sidzan/netforce
netforce_mfg/netforce_mfg/models/bom_line.py
Python
mit
1,967
0.004575
# Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields class BomLine(Model): _name = "bom.line" _fields = { "bom_id": fields.Many2One("bom", "BoM", required=True, on_delete="cascade"), "produc
t_id": fields.Many2One("product", "Product", required=True), "qty": fields.Decimal("Qty", required=True, scale=6), "uom_id": fields.Many2One("uom", "UoM", required=True), "location_id": fields.Many2One("stock.location", "RM Warehouse"), "container": fields.Selection([["sale", "From Sales Order"]], "RM Container"), "lot": fields.Selection([["production", "From Production Order"]], "RM Lot"), "issue_method": fields.Selection([["manual", "Manual"], ["backflush", "Backflush"]], "Issue Method"), "qty2": fields.Decimal("Qty2", scale=6), "notes": fields.Text("Notes"), } BomLine.register()
dominickhera/PosaRepo
cis3250labs/parseTest.py
Python
apache-2.0
2,244
0.039661
#!/usr/bin/python import re userInput = raw_input("input equation\n") numCount = 0 operandCount = 0 entryBracketCount = 0 exitBracketCount = 0 charCount = 0 endOfLine = len(userInput) - 1 for i in range(len(userInput)): if (re.search('[\s*a-z\s*A-Z]+', userInput[i])): charCount = charCount + 1 print operandCount, " 1" elif (re.search('[\s*0-9]+', userInput[i])): numCount = numCount + 1 print operandCount, " 2" elif (re.search('[\*]', userInput[i])): print 'TRUE' # operandCount = operandCount + 1 # print operandCount, " 3.5" # elif (re.search('[\s*\+|\s*\-|\s*\/]+', userInput[i])): elif (re.search('[+-/*]+', userInput[i])): oper
andCount = operandCount + 1 print operandCount, " 3" # if(
re.search('[\s*\+|\s*\-|\s*\/]+', userInput[endOfLine])): if(re.search('[+-/*]+', userInput[endOfLine])): print "invalid expression" print "1" exit(0) else: if((re.search('[\s*a-zA-Z]+', userInput[i - 1])) or (re.search('[\s*\d]+', userInput[i - 1]))): continue else: print 'invalid expression' print '2' exit(0) if(re.search('[\s*\d]+', userInput[i - 1])): continue else: print 'invalid expression' print '3' exit(0) if(re.search('[\s*a-zA-Z]+', userInput[i + 1])): continue elif(re.search('[\s*\d]+', userInput[i + 1])): continue elif (re.search('[\(]+', userInput[i + 1])): continue elif (re.search('[\)]+', userInput[i + 1])): continue else: print 'invalid expression' print '4' exit(0) elif (re.search('[\(]+', userInput[i])): entryBracketCount = entryBracketCount + 1 print operandCount, " 4" elif (re.search('[\)]+', userInput[i])): exitBracketCount = exitBracketCount + 1 print operandCount, " 5" if(re.search('[\)]+', userInput[endOfLine])): continue else: if(re.search('[\(]+', userInput[i + 1])): print 'invalid expression' print '5' exit(0) print operandCount, " 6" if (entryBracketCount != exitBracketCount): print "invalid expression" print '6' exit(0) elif operandCount == 0: print operandCount print "invalid expression" print '7' exit(0) elif ((numCount == 0) and (charCount == 0)): print "invalid expression" print '8' exit(0) else: print "valid expression"
leppa/home-assistant
homeassistant/components/zigbee/switch.py
Python
apache-2.0
669
0.001495
"""Support for Zigbee switches.""" import voluptuous as vol from homeassistant.components.switch import SwitchDevice from . import PLATFORM_S
CHEMA, ZigBeeDigitalOut, ZigBeeDigitalOutConfig CONF_ON_STATE = "on_state" DEFAULT_ON_STATE = "high" STATES = ["high", "low"] PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_ON_STATE): vol.In(STATES)}) def setup_platform(hass, conf
ig, add_entities, discovery_info=None): """Set up the Zigbee switch platform.""" add_entities([ZigBeeSwitch(hass, ZigBeeDigitalOutConfig(config))]) class ZigBeeSwitch(ZigBeeDigitalOut, SwitchDevice): """Representation of a Zigbee Digital Out device.""" pass
Apanatshka/C3P
c3p/tests/all_tests.py
Python
gpl-3.0
1,036
0.003861
#!/usr/bin/python # -*- coding: utf-8 -*- # Python 3.2 code # # Copyright (c) 2012 Jeff Smits # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Publi
c License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # C3P - C-compatible code preprocessor # This commandline tool reads a file and expands macro's. # # This
file is a utility file and doesn't contain the whole tool. # Also it does not run standalone. # # This file imports all the tests from .acceptance_tests import Acc_test from .unit_tests import *
vladpopovici/WSItk
WSItk/tools/wsi_bot_codebook3.py
Python
mit
7,170
0.007671
#!/usr/bin/env python2 # # wsi_bot_codebook3 # # Version 3 of codebook construction: # # -uses OpenCV for faster operation - but different local descriptors than in the 1st version; # -uses annotation files for defining the regions from where the descriptors are to be # extracted # - try to optimize the codebook with respect to some class labels from __future__ import (absolute_import, division, print_function, unicode_literals) __version__ = 0.1 __author__ = 'Vlad Popovici' import os import argparse as opt import numpy as np import numpy.linalg from scipy.stats import ttest_ind import skimage.draw import skimage.io from skimage.exposure import equalize_adapthist, rescale_intensity import cv2 import cv2.xfeatures2d from sklearn.cluster import MiniBatchKMeans from sklearn.lda import LDA from stain.he import rgb2he from util.storage import ModelPersistence def find_in_list(_value, _list): """ Returns the indexes of all occurrences of value in a list. """ return np.array([i for i, v in enumerate(_list) if v == _value], dtype=int) def main(): p = opt.ArgumentParser(description=""" Extracts features from annotated regions and constructs a codebook of a given size. """) p.add_argument('in_file', action='store', help='a file with image file, annotation file and label (0/1)') p.add_argument('out_file', action='store', help='resulting model file name') #p.add_argument('codebook_size', action='store', help='codebook size', type=int) p.add_argument('-t', '--threshold', action='store', type=int, default=5000, help='Hessian threshold for SURF features.') p.add_argument('-s', '--standardize', action='store_true', default=False, help='should the features be standardized before codebook construction?') p.add_argument('-v', '--verbose', action='store_true', help='verbose?') args = p.parse_args() th = args.threshold all_image_names, all_descriptors = [], [] all_roi = [] y = [] unique_image_names = [] with open(args.in_file, mode='r') as fin: for l in fin.readlines(): l = l.strip() if len(l) == 0: break img_file, annot_file, lbl = [z_ for z_ in l.split()][0:3] # file names: image and its annotation and label y.append(int(lbl)) if args.verbose: print("Image:", img_file) img = cv2.imread(img_file) coords = np.fromfile(annot_file, dtype=int, sep=' ') # x y - values coords = np.reshape(coords, (coords.size/2, 2), order='C') # get the bounding box: xmin, ymin = coords.min(axis=0) xmax, ymax = coords.max(axis=0) if args.verbose: print("\t...H&E extraction") img = img[ymin:ymax+2, xmin:xmax+2, :] # keep only the region of interest img_h, _ = rgb2he(img, normalize=True) # get the H- component img_h = equalize_adapthist(img_h) img_h = rescale_intensity(img_h, out_range=(0,255)) # make sure the dtype is right for image and the mask: OpenCV is sensitive to data type img_h = img_h.astype(np.uint8) if args.verbose: print("\t...building mask") mask = np.zeros(img_h.shape, dtype=np.uint8) r, c = skimage.draw.polygon(coords[:,1]-ymin, coords[:,0]-xmin) # adapt to new image... mask[r,c] = 1 # everything outside the region is black if args.verbose: print("\t...feature detection and computation") img_h *= mask feat = cv2.xfeatures2d.SURF_create(hessianThreshold=th) keyp, desc = feat.detectAndCompute(img_h, mask) if args.verbose: print("\t...", str(len(keyp)), "features extracted") all_descriptors.extend(desc) all_image_names.extend([img_file] * len(keyp)) unique_image_names.append(img_file) # end for X = np.hstack(all_descriptors) X = np.reshape(X, (len(all_descriptors), all_descriptors[0].size), order='C') if args.standardize: # make sure each variable (column) is mean-centered and has unit standard deviation Xm = np.mean(X, axis=0) Xs = np.std(X, axis=0) Xs[np.isclose(Xs, 1e-16)] = 1.0 X = (X - Xm) / Xs y = np.array(y, dtype=int) rng = np.random.RandomState(0) acc = [] # will keep accuracy of the classifier vqs = [] # all quantizers, to find the best for k in np.arange(10, 121, 10): # Method: # -generate a codebook with k codewords # -re-code the data # -compute frequencies # -estimate classification on best 10 features if args.verbose:
print("\nK-means clustering (k =
", str(k), ")") print("\t...with", str(X.shape[0]), "points") #-codebook and re-coding vq = MiniBatchKMeans(n_clusters=k, random_state=rng, batch_size=500, compute_labels=True, verbose=False) # vector quantizer vq.fit(X) vqs.append(vq) #-codeword frequencies frq = np.zeros((len(unique_image_names), k)) for i in range(vq.labels_.size): frq[unique_image_names.index(all_image_names[i]), vq.labels_[i]] += 1.0 for i in range(len(unique_image_names)): if frq[i, :].sum() > 0: frq[i, :] /= frq[i, :].sum() if args.verbose: print("...\tfeature selection (t-test)") pv = np.ones(k) for i in range(k): _, pv[i] = ttest_ind(frq[y == 0, i], frq[y == 1, i]) idx = np.argsort(pv) # order of the p-values if args.verbose: print("\t...classification performance estimation") clsf = LDA(solver='lsqr', shrinkage='auto').fit(frq[:,idx[:10]], y) # keep top 10 features acc.append(clsf.score(frq[:, idx[:10]], y)) acc = np.array(acc) k = np.arange(10, 121, 10)[acc.argmax()] # best k if args.verbose: print("\nOptimal codebook size:", str(k)) # final codebook: vq = vqs[acc.argmax()] # compute the average distance and std.dev. of the points in each cluster: avg_dist = np.zeros(k) sd_dist = np.zeros(k) for k in range(0, k): d = numpy.linalg.norm(X[vq.labels_ == k, :] - vq.cluster_centers_[k, :], axis=1) avg_dist[k] = d.mean() sd_dist[k] = d.std() with ModelPersistence(args.out_file, 'c', format='pickle') as d: d['codebook'] = vq d['shift'] = Xm d['scale'] = Xs d['standardize'] = args.standardize d['avg_dist_to_centroid'] = avg_dist d['stddev_dist_to_centroid'] = sd_dist return True if __name__ == '__main__': main()
icereval/modular-file-renderer
mfr/ext/image/render.py
Python
apache-2.0
481
0
"""Ima
ge renderer module.""" from mfr.core import RenderResult def render_img_tag(fp, src=None, alt=''): """A simple image tag renderer. :param fp: File pointer :param src: Path to file :param alt: Alternate text for the image :return: RenderResult object containing the content html """ # Default src to the filename src = src or fp.name content = '<img src="{src}" alt="{alt}" />'.format(src=src, alt=alt) return RenderResult(content)
ZeitOnline/zeit.push
src/zeit/push/message.py
Python
bsd-3-clause
13,344
0
from zeit.cms.i18n import MessageFactory as _ import grokcore.component as grok import logging import zeit.cms.interfaces import zeit.objectlog.interfaces import zeit.push.interfaces import zope.cachedescriptors.property import zope.component log = logging.getLogger(__name__) class Message(grok.Adapter): grok.context(zeit.cms.interfaces.ICMSContent) grok.implements(zeit.push.interfaces.IMessage) grok.baseclass() get_text_from = NotImplemented def __init__(self, context): self.context = context self.config = {} def send(self): """Send push notification to external service. We *never* want to re-send a push notification on publish, even if the initial notification failed, since the information could be outdated. Therefore we must disable the notification before anything else. Re-sending can be done manually by re-enabling the service. """ self._disable_message_config() if not self.text: raise ValueError('No text configured') kw = {} kw.update(self.config) kw['message'] = self try: notifier = zope.component.getUtility( zeit.push.interfaces.IPushNotifier, name=self.type) notifier.send(self.text, self.url, **kw) self.log_success() log.info('Push notification for %s sent', self.type) except Exception, e: self.log_error(str(e)) log.error(u'Error during push to %s with config %s', self.type, self.config, exc_info=True) def _disable_message_config(self): push = zeit.push.interfaces.IPushMessages(self.context) push.set(self.config, enabled=False) @property def text(self): push = zeit.push.interfaces.IPushMessages(self.context) return getattr(push, self.get_text_from) @property def type(self): return self.__class__.__dict__['grokcore.component.directive.name'] @property def url(self): config = zope.app.appsetup.product.getProductConfiguration( 'zeit.push') return zeit.push.interfaces.IPushURL(self.context).replace( zeit.cms.interfaces.ID_NAMESPACE, config['push-target-url']) @zope.cachedescriptors.property.Lazy def object_log(self): return zeit.objectlog.interfaces.ILog(self.context) def log_success(self): self.object_log.log(_( 'Push notification for "${name}" sent.' ' (Message: "${message}", Details: ${details})', mapping={'name': self.type.capitalize(), 'message': self.text, 'details': self.log_message_details})) def log_error(self, reason): self.object_log.log(_( 'Error during push to ${name} ${details}: ${reason}', mapping={'name': self.type.capitalize(), 'details': self.log_message_details, 'reason': reason})) @property def log_message_details(self): return '-' @grok.adapter(zeit.cms.interfaces.ICMSContent) @grok.implementer(zeit.push.interfaces.IPushURL) def default_push_url(context): return context.uniqueId class AccountData(grok.Adapter): grok.context(zeit.cms.interfaces.ICMSContent) grok.implements(zeit.push.interfaces.IAccountData) def __init__(self, context): super(AccountData, self).__init__(context) self.__parent__ = context # make security work @property def push(self): return zeit.push.interfaces.IPushMessages(self.context) @property def facebook_main_enabled(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get(type='facebook', account=source.MAIN_ACCOUNT) return service and service.get('enabled') @facebook_main_enabled.setter def facebook_main_enabled(self, value): source = zeit.push.interfaces.facebookAccountSource(None) self.push.set(dict( type='facebook', account=source.MAIN_ACCOUNT), enabled=value) # We cannot use the key ``text``, since the first positional parameter of # IPushNotifier.send() is also called text, which causes TypeError. @property def facebook_main_text(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get(type='facebook', account=source.MAIN_ACCOUNT) return service and service.get('override_text') @facebook_main_text.setter def facebook_main_text(self, value): source = zeit.push.interfaces.facebookAccountSource(None) self.push.set(dict( type='facebook', account=source.MAIN_ACCOUNT), override_text=value
) @property def facebook_magazin_enabled(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get( type='facebook', account=source.MAGAZIN_ACCOUNT) return service and service.get('enabled') @facebook_magazin_enabled.setter def facebook_magazin_enabled(self, value): source = zeit.push.interfaces.facebookAccountSourc
e(None) self.push.set(dict( type='facebook', account=source.MAGAZIN_ACCOUNT), enabled=value) @property def facebook_magazin_text(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get( type='facebook', account=source.MAGAZIN_ACCOUNT) return service and service.get('override_text') @facebook_magazin_text.setter def facebook_magazin_text(self, value): source = zeit.push.interfaces.facebookAccountSource(None) self.push.set(dict( type='facebook', account=source.MAGAZIN_ACCOUNT), override_text=value) @property def facebook_campus_enabled(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get(type='facebook', account=source.CAMPUS_ACCOUNT) return service and service.get('enabled') @facebook_campus_enabled.setter def facebook_campus_enabled(self, value): source = zeit.push.interfaces.facebookAccountSource(None) self.push.set(dict( type='facebook', account=source.CAMPUS_ACCOUNT), enabled=value) @property def facebook_campus_text(self): source = zeit.push.interfaces.facebookAccountSource(None) service = self.push.get( type='facebook', account=source.CAMPUS_ACCOUNT) return service and service.get('override_text') @facebook_campus_text.setter def facebook_campus_text(self, value): source = zeit.push.interfaces.facebookAccountSource(None) self.push.set(dict( type='facebook', account=source.CAMPUS_ACCOUNT), override_text=value) @property def twitter_main_enabled(self): source = zeit.push.interfaces.twitterAccountSource(None) service = self.push.get(type='twitter', account=source.MAIN_ACCOUNT) return service and service.get('enabled') @twitter_main_enabled.setter def twitter_main_enabled(self, value): source = zeit.push.interfaces.twitterAccountSource(None) self.push.set(dict( type='twitter', account=source.MAIN_ACCOUNT), enabled=value) @property def twitter_ressort_text(self): return self._nonmain_twitter_service.get('override_text') @twitter_ressort_text.setter def twitter_ressort_text(self, value): self.push.set( dict(type='twitter', variant='ressort'), override_text=value) @property def twitter_ressort(self): return self._nonmain_twitter_service.get('account') @twitter_ressort.setter def twitter_ressort(self, value): service = self._nonmain_twitter_service enabled = None # BBB `variant` was introduced in zeit.push-1.21 if service and 'variant' not in service: self.push.delete(service) enabled = service.get('enabled') self.push.set( dict(type='tw
wigginslab/lean-workbench
lean_workbench/scale/scale_resource.py
Python
mit
1,731
0.005777
import sys import os from scale_model import StartupDataModel, VCModel from flask.ext.restful import Resource, reqparse from flask import Flask, jsonify, request, make_response import os from database import db from flask.ext.security import current_user from json import dumps class Scale_DAO(object): def __init__(self): print 'making scale DAO' self.user_scale = StartupDataModel.query.filter_by(username=current_user.email).order_by(StartupDataModel.date.desc()).first() print self.user_scale class Scale_resource(Resource): def get
(self, **kwargs): """ TODO: get old data to render in form as default """ #check= request.args.get('check') if current_user.is_anonymous(): return jsonify(status=400) scale = Scale_DAO() if scale.user_scale: return make_response(dumps(scale.user_scale.as_dict())) else: return jsonify(scale_authed=False) d
ef post(self): """ TODO: add update instead of just creating whole new record """ if current_user.is_anonymous(): return jsonify(msg="You are no longer logged in",status=400) try: data = request.json cb_url = data.get('crunchbase_url') al_url = data.get('angellist_url') description = data.get('description') new_data = StartupDataModel(username=current_user.email, crunchbase_url=cb_url, angellist_url=al_url, description=description) db.session.add(new_data) db.session.commit() return jsonify(status=200,msg="Data added successfully!") except: jsonify(msg="Error adding your data.")
tangyaohua/dl4mt
session2/train_nmt.py
Python
bsd-3-clause
1,646
0.003645
im
port numpy # from nmt import train # from nmtlm import train from nmt import train def main(job_id, params): print params trainerr, validerr, testerr = train(saveto=params['model'][0], reload_=params['reload'][0],
dim_word=params['dim_word'][0], dim=params['dim'][0], n_words=params['n-words'][0], n_words_src=params['n-words'][0], decay_c=params['decay-c'][0], clip_c=params['clip-c'][0], lrate=params['learning-rate'][0], optimizer=params['optimizer'][0], maxlen=50, batch_size=16, valid_batch_size=16, validFreq=5000, dispFreq=10, saveFreq=5000, sampleFreq=10, use_dropout=params['use-dropout'][0]) return validerr if __name__ == '__main__': main(0, { 'model': ['model.npz'], 'dim_word': [384], 'dim': [512], 'n-words': [30000], 'optimizer': ['adam'], 'decay-c': [0.], 'clip-c': [10.], 'use-dropout': [False], 'learning-rate': [0.0001], 'reload': [False]})
e-gob/plataforma-kioscos-autoatencion
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/monitoring/bigpanda.py
Python
bsd-3-clause
5,763
0.002776
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: bigpanda author: "Hagai Kariti (@hkariti)" short_description: Notify BigPanda about deployments version_added: "1.8" description: - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. options: component: description: - "The name of the component being deployed. Ex: billing" required: true aliases: ['name'] version: description: - The deployment version. required: true token: description: - API token. required: true state: description: - State of the deployment. required: true choices: ['started', 'finished', 'failed'] hosts: description: - Name of affected host name. Can be a list. required: false default: machine's hostname aliases: ['host'] env: description: - The environment name, typically 'production', 'staging', etc. required: false owner: description: - The person responsible for the deployment. required: false description: description: - Free text description of the deployment. required: false url: description: - Base URL of the API server. required: False default: https://api.bigpanda.io validate_certs: description: - If C(no), SSL certificates for the target url will not be validated. This should only be used on personally controlled sites using self-signed certificates. required: false default: 'yes' choices: ['yes', 'no'] # informational: requirements for nodes requirements: [ ] ''' EXAMPLES = ''' - bigpanda: component: myapp version: '1.3' token: '{{ bigpanda_token }}' state: started - bigpanda: component: myapp version: '1.3' token: '{{ bigpanda_token }}' state: finished # If outside servers aren't reachable from your machine, use delegate_to and override hosts: - bigpanda: component: myapp version: '1.3' token: '{{ bigpanda_token }}' hosts: '{{ ansible_hostname }}' state: started delegate_to: localhost register: deployment - bigpanda: component: '{{ deployment.component }}' version: '{{ deployment.version }}' token: '{{ deployment.token }}' state: finished delegate_to: localhost ''' # =========================================== # Module execution. # import json import socket import traceback from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_native from ansible.module_utils.urls import fetch_url def main(): module = AnsibleModule( argument_spec=dict( component=dict(required=True, aliases=['name']), version=dict(required=True), token=dict(required=True, no_log=True), state=dict(required=True, choices=['started', 'finished', 'failed']), hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), env=dict(required=False), owner=dict(required=False), description=dict(required=False), message=dict(required=False), source_system=dict(required=False, default='ansible'), validate_certs=dict(default='yes', type='bool'), url=dict(required=False, default='https://api.bigpanda.io'), ), supports_check_mode=True, check_invalid_arguments=False, ) token = module.params['token'] state = module.params['state'] url = module.params['url'] # Build the common request body body = dict() for k in ('component', 'version', 'hosts'): v = module.params[k] if v is not None: body[k] = v if not isinstance(body['hosts'], list): body['hosts'] = [body['hosts']] # Insert state-specific attributes to body if state == 'started': for k in ('source_system', 'env', 'owner', 'description'): v = module.params[k] if v is not None: body[k] = v request_url = url + '/data/events/deployments/start' else: mess
age = module.params['message'] if message is not None: body['errorMessage'] = message if state == 'finished': body['status'] = 'success' else: body['status'] = 'failure' request_url = url + '/data/events/deployments/end' # Build the deployment object we return deployment = dict(token=token, url=url) deployment.update(body)
if 'errorMessage' in deployment: message = deployment.pop('errorMessage') deployment['message'] = message # If we're in check mode, just exit pretending like we succeeded if module.check_mode: module.exit_json(changed=True, **deployment) # Send the data to bigpanda data = json.dumps(body) headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} try: response, info = fetch_url(module, request_url, data=data, headers=headers) if info['status'] == 200: module.exit_json(changed=True, **deployment) else: module.fail_json(msg=json.dumps(info)) except Exception as e: module.fail_json(msg=to_native(e), exception=traceback.format_exc()) if __name__ == '__main__': main()
jargij/led-pomper-sha2017
img_to_queue.py
Python
mit
3,094
0.005171
import socket import random from PIL import Image import json import sys, getopt import math import pika # Screen VARS offset_x = 80 offset_y = 24 screen_width = 240 screen_height = 240 # Internal options queueAddress = '' fileName = '' workers = 36 Matrix = [] def main(argv): global fileName, workers inputFile = '' try: opts, args = getopt.getopt(argv, "hi:w:", ["file=", "workers="]) except getopt.GetoptError: print('img_to_queue.py -i <inputfile> -w workers') sys.exit(2) for opt, arg in opts: if opt == '-h': print('img_to_queue.py -i <inputfile> -w workers') sys.exit() elif opt in ("-i", "--file"): fileName = arg print("File to process: " + fileName) elif opt in ("-w", "--workers"): workers = int(arg) if (math.sqrt(float(workers)) - int(math.sqrt(float(workers))) > 0):
print('The square root of amount of workers is not a whole numbers. GTFO!') sys.exit() print("Amount of available workers: " + str(workers)) pompImage() def addPixelToWorkFile(x, y, r, g, b, index_x, index_y, Matrix): #print("Current index x:" + str(index_x) + " y: " + str(index_y)) Matrix[index_x][index_y].append({'x': x, 'y': y, 'rgb': "%0.2X" % r + '' + "%0.2X" % g + '' + "%0.2X" % b}) def pompImage():
print("Processiong image to JSON") im = Image.open(fileName).convert('RGB') im.thumbnail((240, 240), Image.ANTIALIAS) _, _, width, height = im.getbbox() # start with x and y index 1 slice_size = int(screen_width / int(math.sqrt(workers))) amount_of_keys = int(screen_width / slice_size) print(amount_of_keys) w, h = amount_of_keys, amount_of_keys Matrix = [[[] for x in range(w)] for y in range(h)] # workFile = [[0 for x in range(amount_of_keys)] for y in range(amount_of_keys)] for x in range(width): index_x = int((x / slice_size)) for y in range(height): r, g, b = im.getpixel((x, y)) index_y = int((y / slice_size)) addPixelToWorkFile(x + offset_x, y + offset_y, r, g, b, index_x, index_y, Matrix) # print("Current index x:"+str(index_x)+" y: "+str(index_y)+" WORKER:"+str(index_y*index_x)) sendToQueue(Matrix) def sendToQueue(arrayOfWorkers): connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost', credentials=pika.PlainCredentials(username='pomper', password='pomper'))) channel = connection.channel() channel.queue_declare(queue='pomper', durable=False,) channel.queue_purge(queue='pomper') for worker in arrayOfWorkers: for pixels in worker: channel.basic_publish(exchange='', routing_key='pomper', body=json.dumps(pixels)) if __name__ == "__main__": main(sys.argv[1:])
sileht/deb-openstack-nova
nova/db/sqlalchemy/migrate_repo/versions/035_secondary_dns.py
Python
apache-2.0
1,198
0
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2011 OpenStack, LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, Table, MetaData, String def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine networks = Table('networks', meta, autoload=True) networks.c.dns.alter(name='dns1') dns2 = Column('dns2', String(255)) networks
.create_column(dns2) def downgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine networks = Table('networks', meta, autoload=True) networks.c.dns1.alter(name='dns') networks.drop_co
lumn('dns2')
linea-it/dri
api/comment/migrations/0006_auto_20191001_1943.py
Python
gpl-3.0
596
0.001678
# Generated by Django 2.1.5 on 2019-10-01 19:43 from django.db import migrations, models class M
igration(migrations.Migration): dependencies = [ ('comment', '0005_auto_20191001_1559'), ] operations = [ migrations.AlterField( model_name='dataset', name='dts_type', field=models.CharField(choices=[('0', 'User Comment'), ('1', 'Validation History'), ('2', 'Reported Issue')], default='0', help_text='Differentiate user comments from automatic validation or defect comments.', max_length=1, verbose_name='Type'), ),
]
hirobert/svgwrite
tests/test_clipping.py
Python
gpl-3.0
800
0.0075
#!/usr/bin/env python #coding:utf-8 # Author: mozman --<mozman@gmx.at> # Purpose: test mixin Clipping # Created: 31.10.2010 # Copyright (C) 2010, Manfred Moitzi # License: GPLv3 import unittest from svgwrite.mixins import Clipping from svgwrite.base import BaseElement
class SVGMock(BaseElement, Clipping): elementname = 'svg' class TestClipping(unittest.TestCase): def test_clip_rect_numbers(self): obj = SVGMock(debug=True) obj.clip_rect(1, 2, 3, 4) self.assertEqual(obj['clip
'], 'rect(1,2,3,4)') def test_clip_rect_auto(self): obj = SVGMock(debug=True) obj.clip_rect('auto', 'auto', 'auto', 'auto') self.assertEqual(obj['clip'], 'rect(auto,auto,auto,auto)') if __name__=='__main__': unittest.main()
yellcorp/dupescan
dupescan/fs/_walker.py
Python
mit
4,089
0.005625
from typing import Iterable, Callable, Optional, Any, List, Iterator from dupescan.fs._fileentry import FileEntry from dupescan.fs._root import Root from dupescan.types import AnyPath FSPredicate = Callable[[FileEntry], bool] ErrorHandler = Callable[[EnvironmentError], Any] def catch_filter(inner_filter: FSPredicate, error_handler_func: ErrorHandler) -> FSPredicate: # If no filter function provided, return one that includes everything. In # this case it will never raise an error, so error_handler_func doesn't get # a look-in here if inner_filter is None: def always_true(*args, **kwargs): return True return always_true # Otherwise if the filter function throws an EnvironmentError, pass it to # the
error_handler_func (if provided) and return false def wrapped_func(*args, **kwargs): try: return inner_filter(*args, **kwargs) except EnvironmentError as env_error: if error_handler_func is not None: error_handler_func(env_error) return False return wrapped_func def noerror(_): pass class Walker(object): def __init__( self, recursive: bool,
dir_object_filter: Optional[FSPredicate]=None, file_object_filter: Optional[FSPredicate]=None, onerror: Optional[ErrorHandler]=None ): self._recursive = bool(recursive) self._onerror = noerror if onerror is None else onerror self._dir_filter = catch_filter(dir_object_filter, self._onerror) self._file_filter = catch_filter(file_object_filter, self._onerror) def __call__(self, paths: Iterable[AnyPath]) -> Iterator[FileEntry]: for root_index, root_path in enumerate(paths): root_spec = Root(root_path, root_index) try: root_obj = FileEntry.from_path(root_path, root_spec) except EnvironmentError as env_error: self._onerror(env_error) continue if root_obj.is_dir and self._dir_filter(root_obj): if self._recursive: yield from self._recurse_dir(root_obj) else: yield root_obj elif root_obj.is_file and self._file_filter(root_obj): yield root_obj def _recurse_dir(self, root_obj: FileEntry): dir_obj_q: List[FileEntry] = [ root_obj ] next_dirs: List[FileEntry] = [ ] while len(dir_obj_q) > 0: dir_obj = dir_obj_q.pop() next_dirs.clear() try: for child_obj in dir_obj.dir_content(): try: if ( child_obj.is_dir and not child_obj.is_symlink and self._dir_filter(child_obj) ): next_dirs.append(child_obj) elif ( child_obj.is_file and self._file_filter(child_obj) ): yield child_obj except EnvironmentError as query_error: self._onerror(query_error) except EnvironmentError as env_error: self._onerror(env_error) dir_obj_q.extend(reversed(next_dirs)) def flat_iterator( paths: Iterable[AnyPath], dir_object_filter: Optional[FSPredicate]=None, file_object_filter: Optional[FSPredicate]=None, onerror: Optional[ErrorHandler]=None ) -> Iterator[FileEntry]: return Walker(False, dir_object_filter, file_object_filter, onerror)(paths) def recurse_iterator( paths: Iterable[AnyPath], dir_object_filter: Optional[FSPredicate]=None, file_object_filter: Optional[FSPredicate]=None, onerror: Optional[ErrorHandler]=None ) -> Iterator[FileEntry]: return Walker(True, dir_object_filter, file_object_filter, onerror)(paths)
Vierkantor/PyServ
run.py
Python
gpl-3.0
67
0
#!/
usr/bin/env python3 from pyserv.databrowse import main main(
)
viswimmer1/PythonGenerator
data/python_files/30585323/ravebot.py
Python
gpl-2.0
7,909
0.013529
import os, sys up_path = os.path.abspath('..') sys.path.append(up_path) from numpy import * import matplotlib import matplotlib.pyplot as plt import matplotlib.patches as mpatches from matplotlib import rc from objects import SimObject from utils import scalar from covar import draw_ellipsoid, vec2cov, cov2vec,\ project_psd from kalman_filter import ekf_update from numpy.random import multivariate_normal as mvn import time from math import atan2, atan import robots from openravepy import * from transforms import unscented_transform from rave_draw import * #import openravepy as rave class RaveLocalizerBot(robots.Robot): NX = -1 NU = -1 def __init__(self, bot, obj): self.bot = bot self.NX = bot.NX + 3 #FIXME (hack for now) self.NU = bot.NU self.dt = bot.dt x = array(zeros((self.NX))) for t in range(bot.NX): x[t] = bot.x[t] x[bot.NX] = obj[0] x[bot.NX+1] = obj[1] x[bot.NX+2] = obj[2] self.EPS = bot.EPS robots.Robot.__init__(self, x, dt=self.dt) def dynamics(self, X, u): bot_up = self.bot.dynamics(X[0:self.bot.NX], u) return vstack((bot_up, X[self.bot.NX:])) def collision_penalty_trajectory(self, x, env): ret
urn 0 #Todo: FI
XME def camera_obj_state(self,x): #Returns the transform of the camera and object camera_transform = self.bot.camera_transform(x[0:self.bot.NX]) obj_pos = x[self.bot.NX:] z = mat(zeros((10,1))) z[0:7] = camera_transform z[7:10] = obj_pos return z """ def fov_state(self, x): xy = mat(self.bot.traj_pos(x)).T theta = self.bot.orientation(x) #print vstack((xy, theta, x[self.bot.NX:])) if isinstance(x, tuple) or len(x.shape) == 1: x = mat(x).T if isinstance(xy, tuple) or xy.shape[0] < xy.shape[1]: xy = mat(xy).T return vstack((xy, theta, x[self.bot.NX:])) """ def observe(self, scene, x=None): zs = self.bot.observe(scene, x[0:self.bot.NX]) return vstack((zs, robots.Robot.observe(self, scene, x))) def draw_trajectory(self, xs, mus=None, Sigmas=None, color=array((1.0, 0.0, 0.0, 0.2))): bnx = self.bot.NX self.bot.draw_trajectory(xs[0:bnx], mus[0:bnx], Sigmas[0:bnx, 0:bnx], color) def draw(self, X=None, color=array((1.0, 0.0, 0.0))): self.bot.draw(x[0:bnx], color) class BarretWAM(robots.Robot): # wrapper for openrave robots NX = 7 NU = 7 EPS = 1e-3 def __init__(self, ravebot, env, state_rep='angles', dt=-1): self.ravebot = ravebot self.env = env # used for drawing purposes self.state_rep = state_rep self.handles = [ ] # used for drawing purposes self.jointnames = ['Shoulder_Yaw', 'Shoulder_Pitch', 'Shoulder_Roll', 'Elbow', 'Wrist_Yaw', 'Wrist_Pitch', 'Wrist_Roll'] self.jointidxs = [ravebot.GetJoint(name).GetDOFIndex() for name in self.jointnames] self.ravebot_manip = self.ravebot.SetActiveManipulator('arm') self.lower_limits, self.upper_limits = self.ravebot.GetDOFLimits() tmp_lower_limits = [] tmp_upper_limits = [] for idx in self.jointidxs: tmp_lower_limits.append(self.lower_limits[idx]) tmp_upper_limits.append(self.upper_limits[idx]) self.lower_limits = mat(array(tmp_lower_limits)).T self.upper_limits = mat(array(tmp_upper_limits)).T self.ravebot.SetActiveDOFs(self.jointidxs) x = [0] * len(self.jointidxs) robots.Robot.__init__(self, x, dt=dt) self.index = BarretWAM.increment_index() def traj_pos(self, x=None): if x == None: x = self.x if self.state_rep == 'angles': return mat(self.forward_kinematics(x)[0:3,3]) else: #state representation = points pass def orientation(self, x=None): if x == None: x = self.x if self.state_rep == 'angles': return self.forward_kinematics(x)[0:3,0:3] else: pass def __str__(self): return 'ravebot[' + str(self.index) + ']' def dynamics(self, x, u): if self.state_rep == 'angles': thetas = x + u thetas = minimum(thetas, self.upper_limits) thetas = maximum(thetas, self.lower_limits) """ for i in range(thetas.shape[0]): if thetas[i] > self.upper_limits[i]: thetas[i] = self.upper_limits[i] elif thetas[i] < self.lower_limits[i]: thetas[i] = self.lower_limits[i] """ return thetas else: pass def camera_transform(self, x): camera_rel_transform = self.ravebot.GetAttachedSensor('camera').GetRelativeTransform() with self.env: self.ravebot.SetDOFValues(x, self.jointidxs) link_transform = mat(self.ravebot.GetLink('wam4').GetTransform()) camera_trans = link_transform * camera_rel_transform camera_quat = quatFromRotationMatrix(array(camera_trans[0:3,0:3])) camera_vec = mat(zeros((7,1))) camera_vec[0:3] = camera_trans[0:3,3] camera_vec[3:7] = mat(camera_quat).T return camera_vec def observe(self, scene, x=None): if x==None: x = self.x zs = robots.Robot.observe(self, scene, x) # also give joint angle observations #if zs.size > 0: # pass #zs = vstack((zs, mat('x[2]'))) #zs = vstack((zs, mat('x[3]'))) #else: # zs = mat('x[3]') return zs def forward_kinematics(self, thetas): with self.env: self.ravebot.SetDOFValues(thetas,self.jointidxs) return mat(self.ravebot_manip.GetEndEffectorTransform()) def inverse_kinematics(self, xyz): pass def draw_Cspace(self, X=None, color='blue'): pass def collision_penalty_trajectory(self, x, env): return 0 #Todo: FIXME def draw_trajectory(self, xs, mus=None, Sigmas=None, color=array((1.0, 0.0, 0.0, 0.2))): T = xs.shape[1] XYZ = mat(zeros((3,T))) for t in range(T): XYZ[:,t] = self.traj_pos(xs[:,t]) if mus != None and Sigmas != None: for t in range(T): mu_y, Sigma_y = unscented_transform(mus[:,t], Sigmas[:,:,t],\ lambda x: self.traj_pos(x)) # padding for positive definiteness Sigma_y = Sigma_y + 0.0001 * identity(3) self.handles.append(draw_ellipsoid(mu_y, Sigma_y, std_dev=2,\ env=self.env, colors=color)) #self.handles.append(self.env.drawlinestrip(points=array(((xyz[0], xyz[1], xyz[2]),(0.0, 0.0,0.0))), # linewidth=3.0)) self.handles.append(self.env.drawlinestrip(points=XYZ.T, linewidth=3.0, colors=color[0:3])) def draw(self, X=None, color=array((1.0, 0.0, 0.0))): if X == None: X = self.x xyz = self.traj_pos(X) with self.env: """ # works with only a few robots newrobot = RaveCreateRobot(self.env,self.ravebot.GetXMLId()) newrobot.Clone(self.ravebot,0) for link in newrobot.GetLinks(): for geom in link.GetGeometries(): geom.SetTransparency(0.6) self.env.Add(newrobot,True) newrobot.SetActiveDOFs(self.jointidxs) newrobot.SetDOFValues(X, self.jointidxs) self.handles.append(newrobot) """ self.handles.append(self.env.plot3(points=xyz, pointsize=1.0, colors=color))
yleo77/leetcode
To_Lower_Case/answer.py
Python
mit
372
0.002688
class Solution: def toLowerCase(self, str: str) -> str: rs = "" # 32 section = ord("a") - or
d("A")
for s in str: if ord(s) >= ord("A") and ord(s) <= ord("Z"): rs = rs + chr(ord(s) + section) else: rs = rs + s return rs sol = Solution() print(sol.toLowerCase("Hello"))
scholer/cadnano2.5
cadnano/controllers/__init__.py
Python
mit
298
0.003356
from .nucleicacidpartitemcontroller import NucleicAcidPartItemController from .
oligoitemcontroller import OligoItemController from .stranditemcontroller import StrandItemController from .viewroo
tcontroller import ViewRootController from .virtualhelixitemcontroller import VirtualHelixItemController
tensorflow/deepmath
deepmath/deephol/deephol_loop/report.py
Python
apache-2.0
8,295
0.005907
r""""DeepHOL large scale reporting in Apache Beam.""" from __future__ import absolute_import from __future__ import division # Import Type Annotations from __future__ import print_function import io import os import apache_beam as beam from apache_beam.metrics import Metrics import matplotlib.pyplot as plot import tensorflow as tf from tf import gfile from typing import List from typing import Text from google.protobuf import text_format from deepmath.deephol import deephol_pb2 from deepmath.deephol import io_util from deepmath.deephol.deephol_loop.missing import recordio from deepmath.deephol.deephol_loop.missing import runner from deepmath.deephol.utilities import deephol_stat_pb2 from deepmath.deephol.utilities import stats STATS_BASENAME = 'proof_stats' AGGREGATE_STAT_BASENAME = 'aggregate_stat' PROVEN_GOALS_BASENAME = 'proven_goals_fps' OPEN_GOALS_BASENAME = 'open_goals_fps' PROVEN_STATS_BASENAME = 'proven_stats' PRETTY_STATS_BASENAME = 'pretty_stats' CACTUS_PLOT_FILE_NAME = 'cactus.pdf' CACTUS_DATA_FILE_NAME = 'cactus.dat' class StatDoFn(beam.DoFn): """Beam DoFn for statistics generation.""" def __init__(self): self.processed_counter = Metrics.counter(self.__class__, 'processed') self.proven_counter = Metrics.counter(self.__class__, 'proven') self.attempted_counter = Metrics.counter(self.__class__, 'attempted') self.nodes_counter = Metrics.counter(self.__class__, 'nodes') def process(self, proof_log: deephol_pb2.ProofLog ) -> List[deephol_stat_pb2.ProofStat]: self.processed_counter.inc() s = stats.proof_log_stats(proof_log) self.proven_counter.inc(s.num_theorems_proved) self.attempted_counter.inc(s.num_theorems_attempted) self.nodes_counter.inc(s.num_nodes) return [s] class AggregateStatsFn(beam.CombineFn): """Beam CombineFn for statistics aggregation.""" def create_accumulator(self): return deephol_stat_pb2.ProofAggregateStat() def add_input(self, target, source): stats.merge_stat(target, source) return target def merge_accumulators(self, aggregate_stats): result = deephol_stat_pb2.ProofAggregateStat() for s in aggregate_stats: stats.merge_aggregate_stat(result, s) return result def extract_output(self, result): return result class UniqueFn(beam.CombineFn): """De-duping combinator for Beam.""" def create_accumulator(self): return set() def add_input(self, target, source): target.add(source) return target def merge_accumulators(self, sets): result = set() for s in sets: result.update(s) return result def extract_output(self, result): return '\n'.join([str(x) for x in result]) def proven_or_open(proof_stat): if proof_stat.num_theorems_proved > 0: yield beam.pvalue.TaggedOutput('proven', '%d' % proof_stat.theorem_fingerprint) else: yield beam.pvalue.TaggedOutput('open', '%d' % proof_stat.theorem_fingerprint) def make_proof_logs_collection(root, proof_logs: Text): return (root | 'Create' >> recordio.ReadFromRecordIO( proof_logs, beam.coders.ProtoCoder(deephol_pb2.ProofLog))) def reporting_pipeline(proof_logs_collection, stats_out: Text, aggregate_stats: Text, proven_goals: Text, open_goals: Text): """A pipeline reporting aggregate statistics and proved theorems. Args: proof_logs_collection: beam collection of proof logs. stats_out: Filename for outputting per proof statistics. aggregate_stats: Filename for storing aggregated statistics proven_goals: Filename for the fingerprint of proven goals. open_goals: Filename for the fingerprint of open goals. Returns: A beam pipeline for writing statistics. """ proof_stats = (proof_logs_collection | 'Stats' >> beam.ParDo(StatDoFn())) _ = proof_stats | 'WriteStats' >> recordio.WriteToRecordIO( file_path_prefix=stats_out, coder=beam.coders.ProtoCoder(deephol_stat_pb2.ProofStat)) _ = ( proof_stats | 'AggregateStats' >> beam.CombineGlobally(AggregateStatsFn()) | 'MapProtoToString' >> beam.Map(text_format.MessageToString) | 'WriteAggregates' >> beam.io.WriteToText(aggregate_stats, '.pbtxt')) results = proof_stats | ( 'ProvenOrOpen' >> beam.FlatMap(proven_or_open).with_outputs()) _ = ( results.proven | 'UniqueProven' >> beam.CombineGlobally(UniqueFn()) | 'WriteProven' >> beam.io.WriteToText(proven_goals, '.txt')) _ = ( results.open | 'UniqueOpen' >> beam.CombineGlobally(UniqueFn()) | 'WriteOpen' >> beam.io.WriteToText(open_goals, '.txt')) def file_lines_set(fname): with gfile.Open(fname) as f: return set([line.rstrip() for line in f]) class ReportingPipeline(object): """Top level class to manage a reporting pipeline.""" def __init__(self, out_dir: Text): self.out_dir = out_dir gfile.MakeDirs(out_dir) self.proof_stats_filename = os.path.join(out_dir, STATS_BASENAME) self.aggregate_stat_filename = os.path.join(out_dir, AGGREGATE_STAT_BASENAME) self.proven_goals_filename = os.path.join(out_dir, PROVEN_GOALS_BASENAME) self.open_goals_filename = os.path.join(out_dir, OPEN_GOALS_BASENAME) self.proven_stats_filename = os.path.join(out_dir, PROVEN_STATS_BASENAME) self.pretty_stats_filename = os.path.join(out_dir, PRETTY_STATS_BASENAME) self.cactus_plot_filename = os.path.join(out_dir, CACTUS_PLOT_FILE_NAME) self.cactus_data_filename = os.path.join(out_dir, CACTUS_DATA_FILE_NAME) def setup_pipeline(self, proof_logs_collection): reporting_pipeline(proof_logs_collection, self.proof_stats_filename, self.aggregate_stat_filename, self.proven_goals_filename, self.open_goals_filename) def write_final_stats(self): """Log and write final aggregated statistics to file system.""" fname = self.aggregate_stat_filename + '-00000-of-00001.pbtxt' aggregate_stat = io_util.load_text_proto( fname, deephol_stat_pb2.ProofAggregateStat, 'aggregate statistics') if aggregate_stat is None: tf.logging.warning('Could not read aggregate statistics "%s"', fname) return tf.logging.info('Stats:\n%s', stats.aggregate_stat_to_string(aggregate_stat))
open_goals =
file_lines_set(self.open_goals_filename + '-00000-of-00001.txt') proven_goals = file_lines_set(self.proven_goals_filename + '-00000-of-00001.txt') never_proven = open_goals - proven_goals num_open_goals = len(never_proven) num_proven_goals = len(proven_goals) tf.logging.info('Open goals: %d', num_open_goals) tf.logging.info('Proved goals: %d', num_proven_goals) perc_proven = 100.0 * num_proven_goals / float(num_open_goals + num_proven_goals) tf.logging.info('Percentage proven: %.2f', perc_proven) with gfile.Open(self.proven_stats_filename, 'w') as f: f.write('%d %d %.2f\n' % (num_open_goals, num_proven_goals, perc_proven)) with gfile.Open(self.pretty_stats_filename, 'w') as f: f.write('%s\n' % stats.detailed_statistics(aggregate_stat)) # Write cactus plot if aggregate_stat.proof_closed_after_millis: cactus_data = list(aggregate_stat.proof_closed_after_millis) cactus_data.sort() with gfile.Open(self.cactus_data_filename, 'w') as f: f.write('\n'.join(map(str, cactus_data))) fig = plot.figure() plot.xlabel('Number of proofs closed') plot.ylabel('Wall clock time in s') plot.plot([ms * .001 for ms in cactus_data]) # convert to seconds buf = io.BytesIO() fig.savefig(buf, format='pdf', bbox_inches='tight') with gfile.Open(self.cactus_plot_filename, 'wb') as f: f.write(buf.getvalue()) def run_pipeline(self, proof_logs: Text): def pipeline(root): proof_logs_collection = make_proof_logs_collection(root, proof_logs) self.setup_pipeline(proof_logs_collection) runner.
shedskin/shedskin
tests/28.py
Python
gpl-3.0
210
0.009524
def propagate(la): #
la: [list(int)] print la, la
# [str], [str] propagate([1]) # [] propagate([2]) # []
maurobaraldi/brms
brms/_settings/production.py
Python
mit
491
0.002037
from b
rms.settings.base import * import dj_database_url DEBUG = False ALLOWED_HOSTS = ['.example.com'] # Use the cached template loader so template is compiled once and read from # memory instead of reading from disk on each load. TEMPLATES[0]['OPTIONS']['loaders'] = [ ('django.template.loaders.cached.Loader', [ 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]), ] DATABASES['default'] = dj_dat
abase_url.config()
rodriguesrl/reddit-clone-udemy
posts/migrations/0002_auto_20170307_1920.py
Python
mit
419
0
# -*
- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-03-07 19:20 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('posts', '0001_initial'),
] operations = [ migrations.AlterField( model_name='post', name='url', field=models.URLField(), ), ]
UdK-VPT/Open_eQuarter
mole/extensions/eval_enev/oeq_AHDE.py
Python
gpl-2.0
1,413
0.008499
# -*- coding: utf-8 -*- import os,math from qgis.core import NULL from mole import oeq_global from mole.project import config from mole.extensions import OeQExtension from mole.stat_corr import rb_contemporary_base_uvalue_by_building_age_lookup def calculation(self=None, parameters={},feature = None): from math import floor, ceil from PyQt4.QtCore import QVariant ahde = NULL if not oeq_global.isnull([parameters['HLAE']]): ahde=float(parameters['HLAE']) + 40.0 * 0.8 # Air Change Heatloss for standard Rooms 40kWh/m2a nach Gei
ger Lüftung im Wohnungsbau # 20% of the Total Area are used for stairs and floors return {'AHDE': {'type': QVariant.Double, 'value': ahde}} extension = OeQExtension( extension_id=__name__, category='Evaluation', subcategory='Building', extension_name='AHD Building per Livig Area EnEV', layer_name= 'Annual Heat Demand (per Living Area, EnEV)',
extension_filepath=os.path.join(__file__), colortable = os.path.join(os.path.splitext(__file__)[0] + '.qml'), field_id='AHDE', source_type='none', par_in=['HLAE'], sourcelayer_name=config.data_layer_name, targetlayer_name=config.data_layer_name, active=True, show_results=['AHDE'], description=u"Calculate EnEV Annual Heat Demand per Living Area", evaluation_method=calculation) extension.registerExtension(default=True)
GbalsaC/bitnamiP
edx-val/edxval/serializers.py
Python
agpl-3.0
4,408
0.001361
""" Serializers for Video Abstraction Layer Serialization is usually sent through the VideoSerializer which uses the EncodedVideoSerializer which uses the profile_name as it's profile field. """ from rest_framework import serializers from django.core.exceptions import ValidationError from edxval.models import Profile, Video, EncodedVideo, Subtitle, CourseVideo class EncodedVideoSerializer(serializers.ModelSerializer): """ Serializer for EncodedVideo object. Uses the profile_name as it's profile value instead of a Profile object. """ profile = serializers.SlugRelatedField(slug_field="profile_name") class Meta: # pylint: disable=C1001, C0111 model = EncodedVideo fields = ( "created", "modified", "url", "file_size", "bitrate", "profile", ) def get_identity(self, data): """ This hook is required for bulk update. We need to override the default, to use the slug as the identity. """ return data.get('profile', None) class SubtitleSerializer(serializers.ModelSerializer): """ Serializer for Subtitle objects """ content_url = serializers.CharField(source='get_absolute_url', read_only=True) content = serializers.CharField(write_only=True) def validate_content(self, attrs, source): """ Validate that the subtitle is in the corre
ct format """ value = attrs[source] if attrs.get('fmt') ==
'sjson': import json try: loaded = json.loads(value) except ValueError: raise serializers.ValidationError("Not in JSON format") else: attrs[source] = json.dumps(loaded) return attrs class Meta: # pylint: disable=C1001, C0111 model = Subtitle lookup_field = "id" fields = ( "fmt", "language", "content_url", "content", ) class CourseSerializer(serializers.RelatedField): """ Field for CourseVideo """ def to_native(self, value): return value.course_id def from_native(self, data): if data: course_video = CourseVideo(course_id=data) course_video.full_clean(exclude=["video"]) return course_video class VideoSerializer(serializers.ModelSerializer): """ Serializer for Video object encoded_videos takes a list of dicts EncodedVideo data. """ encoded_videos = EncodedVideoSerializer(many=True, allow_add_remove=True) subtitles = SubtitleSerializer(many=True, allow_add_remove=True, required=False) courses = CourseSerializer(many=True, read_only=False) url = serializers.SerializerMethodField('get_url') class Meta: # pylint: disable=C1001, C0111 model = Video lookup_field = "edx_video_id" exclude = ('id',) def get_url(self, obj): """ Return relative url for the object """ return obj.get_absolute_url() def restore_fields(self, data, files): """ Overridden function used to check against duplicate profile names. Converts a dictionary of data into a dictionary of deserialized fields. Also checks if there are duplicate profile_name(s). If there is, the deserialization is rejected. """ reverted_data = {} if data is not None and not isinstance(data, dict): self._errors['non_field_errors'] = ['Invalid data'] return None try: profiles = [ev["profile"] for ev in data.get("encoded_videos", [])] if len(profiles) != len(set(profiles)): self._errors['non_field_errors'] = ['Invalid data: duplicate profiles'] except KeyError: raise ValidationError("profile required for deserializing") except TypeError: raise ValidationError("profile field needs to be a profile_name (str)") for field_name, field in self.fields.items(): field.initialize(parent=self, field_name=field_name) try: field.field_from_native(data, files, field_name, reverted_data) except ValidationError as err: self._errors[field_name] = list(err.messages) return reverted_data
praekelt/jmbo-janrain
janrain/urls.py
Python
bsd-3-clause
70
0
fro
m django.conf.urls.defaults import patterns, url urlpattern
s = ()
jstammers/EDMSuite
EDMScripts/EDMLoop_neg_slope.py
Python
mit
14,423
0.026555
# Import a whole load of stuff from System.IO import * from System.Drawing import * from System.Runtime.Remoting import * from System.Threading import * from System.Windows.Forms import * from System.Xml.Serialization import * from System import * from Analysis.EDM import * from DAQ.Environment import * from EDMConfig import * def saveBlockConfig(path, config): fs = FileStream(path, FileMode.Create) s = XmlSerializer(BlockConfig) s.Serialize(fs,config) fs.Close() def loadBlockConfig(path): fs = FileStream(path, FileMode.Open) s = XmlSerializer(BlockConfig) bc = s.Deserialize(fs) fs.Close() return bc def writeLatestBlockNotificationFile(cluster, blockIndex): fs = FileStream(Environs.FileSystem.Paths["settingsPath"] + "\\BlockHead\\latestBlock.txt", FileMode.Create) sw = StreamWriter(fs) sw.WriteLine(cluster + "\t" + str(blockIndex)) sw.Close() fs.Close() def checkYAGAndFix(): interlockFailed = hc.YAGInterlockFailed; if (interlockFailed): bh.StopPattern(); bh.StartPattern(); def printWaveformCode(bc, name): print(name + ": " + str(bc.GetModulationByName(name).Waveform.Code) + " -- " + str(bc.GetModulationByName(name).Waveform.Inverted)) def prompt(text): sys.stdout.write(text) return sys.stdin.readline().strip() def measureParametersAndMakeBC(cluster, eState, bState, rfState, scramblerV, probePolAngle, pumpPolAngle): fileSystem = Environs.FileSystem print("Measuring parameters ...") bh.StopPattern() hc.UpdateRFPowerMonitor() hc.UpdateRFFrequencyMonitor() bh.StartPattern() hc.UpdateBCurrentMonitor() hc.UpdateVMonitor() hc.UpdateI2AOMFreqMonitor() print("V plus: " + str(hc.CPlusMonitorVoltage * hc.CPlusMonitorScale)) print("V minus: " + str(hc.CMinusMonitorVoltage * hc.CMinusMonitorScale)) print("Bias: " + str(hc.BiasCurrent)) print("B step: " + str(abs(hc.FlipStepCurrent))) print("DB step: " + str(abs(hc.CalStepCurrent))) # load a default BlockConfig and customise it appropriately settingsPath = fileSystem.Paths["settingsPath"] + "\\BlockHead\\" bc = loadBlockConfig(settingsPath + "default.xml") bc.Settings["cluster"] = cluster bc.Settings["eState"] = eState bc.Settings["bState"] = bState bc.Settings["rfState"] = rfState bc.Settings["phaseScramblerV"] = scramblerV bc.Settings["probePolarizerAngle"] = probePolAngle bc.Settings["pumpPolarizerAngle"] = pumpPolAngle bc.Settings["ePlus"] = hc.CPlusMonitorVoltage * hc.CPlusMonitorScale bc.Settings["eMinus"] = hc.CMinusMonitorVoltage * hc.CMinusMonitorScale bc.GetModulationByName("B").Centre = (hc.BiasCurrent)/1000 bc.GetModulationByName("B").Step = abs(hc.FlipStepCurrent)/1000 bc.GetModulationByName("DB").Step = abs(hc.CalStepCurrent)/1000 # these next 3, seemingly redundant, lines are to preserve backward compatibility bc.GetModulationByName("B").PhysicalCentre = (hc.BiasCurrent)/1000 bc.GetModulationByName("B").PhysicalStep = abs(hc.FlipStepCurrent)/1000 bc.GetModulationByName("DB").PhysicalStep = abs(hc.CalStepCurrent)/1000 bc.GetModulationByName("RF1A").Centre = hc.RF1AttCentre bc.GetModulationByName("RF1A").Step = hc.RF1AttStep bc.GetModulationByName("RF1A").PhysicalCentre = hc.RF1PowerCentre bc.GetModulationByName("RF1A").PhysicalStep = hc.RF1PowerStep bc.GetModulationByName("RF2A").Centre = hc.RF2AttCentre bc.GetModulationByName("RF2A").Step = hc.RF2AttStep bc.GetModulationByName("RF2A").PhysicalCentre = hc.RF2PowerCentre bc.GetModulationByName("RF2A").PhysicalStep = hc.RF2PowerStep bc.GetModulationByName("RF1F").Centre = hc.RF1FMCentre bc.GetModulationByName("RF1F").Step = hc.RF1FMStep bc.GetModulationByName("RF1F").PhysicalCentre = hc.RF1FrequencyCentre bc.GetModulationByName("RF1F").PhysicalStep = hc.RF1FrequencyStep bc.GetModulationByName("RF2F").Centre = hc.RF2FMCentre bc.GetModulationByName("RF2F").Step = hc.RF2FMStep bc.GetModulationByName("RF2F").PhysicalCentre = hc.RF2FrequencyCentre bc.GetModulationByName("RF2F").PhysicalStep = hc.RF2FrequencyStep bc.GetModulationByName("LF1").Centre = hc.FLPZTVoltage bc.GetModulationByName("LF1").Step = hc.FLPZTStep bc.GetModulationByName("LF1").PhysicalCentre = hc.I2LockAOMFrequencyCentre bc.GetModulationByName("LF1").PhysicalStep = hc.I2LockAOMFrequencyStep # generate the waveform codes print("Generating waveform codes ...") eWave = bc.GetModulationByName("E").Waveform eWave.Name = "E" lf1Wave = bc.GetModulationByName("LF1").Waveform lf1Wave.Name = "LF1" ws = WaveformSetGenerator.GenerateWaveforms( (eWave, lf1Wave), ("B","DB","PI","RF1A","RF2A","RF1F","RF2F") ) bc.GetModulationByName("B").Waveform = ws["B"] bc.GetModulationByName("DB").Waveform = ws["DB"] bc.GetModulationByName("PI").Waveform = ws["PI"] bc.GetModulationByName("RF1A").Waveform = ws["RF1A"] bc.GetModulationByName("RF2A").Waveform = ws["RF2A"] bc.GetModulationByName("RF1F").Waveform = ws["RF1F"] bc.GetModulationByName("RF2F").Waveform = ws["RF2F"] # change the inversions of the static codes E and LF1 bc.GetModulationByName("E").Waveform.Inverted = WaveformSetGenerator.RandomBool() bc.GetModulationByName("LF1").Waveform.Inverted = WaveformSetGenerator.RandomBool() # print the waveform codes # printWaveformCode(bc, "E") # printWaveformCode(bc, "B") # printWaveformCode(bc, "DB") # printWaveformCode(bc, "PI") # printWaveformCode(bc, "RF1A") # printWaveformCode(bc, "RF2A") # printWaveformCode(bc, "RF1F") # printWaveformCode(bc, "RF2F") # printWaveformCode(bc, "LF1") # store e-switch info in block config print("Storing E switch parameters ...") bc.Settings["eRampDownTime"] = hc.ERampDownTime bc.Settings["eRampDownDelay"] = hc.ERampDownDelay bc.Settings["eBleedTime"] = hc.EBleedTime bc.Settings["eSwitchTime"] = hc.ESwitchTime bc.Settings["eRampUpTime"] = hc.ERampUpTime bc.Settings["eRampUpDelay"] = hc.ERampUpDelay # this is for legacy analysis compatibility bc.Settings["eDischargeTime"] = hc.ERampDownTime + hc.ERampDownDelay bc.Settings["eChargeTime"] = hc.ERampUpTime + hc.ERampUpDelay # store the E switch asymmetry in the block bc.Settings["E0PlusBoost"] = hc.E0PlusBoost return bc # lock gains # microamps of current per volt of control input kSteppingBiasCurrentPerVolt = 1000.0 # max change in the b-bias voltage per block kBMaxChange = 0.05 # volts of rf*a input required per cal's worth of offset kRFAVoltsPerCal = 3.2 kRFAMaxChange = 0.1 # volts of rf*f input required per cal's worth of offset kRFFVoltsPerCal = 8 kRFFMaxChange = 0.1 def updateLocks(bState): pmtChannelValues = bh.DBlock.ChannelValues[0] # note the weird python syntax for a one element list sigIndex = pmtChannelValues.GetChannelIndex(("SIG",)) sigValue = pmtChannelValues.GetValue(sigIndex) bIndex = pmtChannelValues.GetChannelIndex(("B",)) bValue = pmtChannelValues.GetValue(bIndex) #bError = pmtChannelValues.GetError(bIndex) dbIndex = pmtChannelValues.GetChannelIndex(("DB",)) dbValue = pmtChannelValues.GetValue(dbIndex) #dbError = pmtChannelValues.GetError(dbIndex) rf1aIndex = pmtChannelValues.GetChannelIndex(("RF1A","DB")) rf1aValue = pmtChannelValues.GetValue(rf1aIndex) #rf1aError = pmtChannelValues.GetError(rf1aIndex) rf2aIndex = pmtChannelValues.GetChannelIndex(("RF2A","DB")) rf2aValue = pmtChannelValues.GetValue(rf2aIndex) #rf2aError = pmtChannelValues.GetError(rf2aIndex) rf1fIndex = pmtChannelValues.GetChannelIndex(("RF1F","DB")) rf1fValue = pmtChannelValues.GetValue(rf1fIndex) #rf1fError = pmtChannelValues.GetError(rf1fIndex) rf2fIndex = pmtChannelValues.GetChannelIndex(("RF2F","DB")) rf2fValue = pmtChannelValues.GetValue(rf2fIndex) #rf2fError = pmtChannelValues.GetError(rf2fIndex) lf1Index = pmtChannelValu
es.GetChannelIndex(("LF1",)) lf1Value = pmtChannelValues.GetValue(lf1Index) #lf1Error = pmtChannelValues.GetError(lf1Index) lf1dbIndex = pmtChannelValues.GetChannelIndex(("LF1","DB")
) lf1dbValue = pmtChannelValues.GetValue(lf1dbIndex) print "SIG: " + str(sigValue) print "B: " + str(bValue) + " DB: " + str(dbValue) print "RF1A: " + str(rf1aValue) + " RF2A: " + str(rf
Lemma1/MAC-POSTS
doc_builder/sphinx-contrib/_template/setup.py
Python
mit
1,194
0.000838
# -*- coding: utf-8 -*- from setuptools import setup, find_packages long_desc = ''' This package contains the ${name} Sphinx extension. .. add description here .. ''' requires = ['Sphinx>=0.6'] setup( name='sphinxcontrib-${name}', version='0.1', url='http://bitbucket.org/birkenfeld/sphinx-contrib', download_url='http://pypi.python.org/pypi/sphinxcontrib-${name}', license='BSD',
aut
hor='${author}', author_email='${author_email}', description='Sphinx "${name}" extension', long_description=long_desc, zip_safe=False, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Framework :: Sphinx :: Extension', #'Framework :: Sphinx :: Theme', 'Topic :: Documentation', 'Topic :: Utilities', ], platforms='any', packages=find_packages(), include_package_data=True, install_requires=requires, namespace_packages=['sphinxcontrib'], )
varepsilon/clickmodels
clickmodels/inference.py
Python
bsd-3-clause
36,839
0.005076
from collections import defaultdict from datetime import datetime import gc import json import math import random import sys from .config_sample import MAX_ITERATIONS, DEBUG, PRETTY_LOG, MAX_DOCS_PER_QUERY, SERP_SIZE, TRANSFORM_LOG, QUERY_INDEPENDENT_PAGER, DEFAULT_REL class NotImplementedError(Exception): pass class ClickModel: """ An abstract click model interface. """ def __init__(self, ignoreIntents=True, ignoreLayout=True, config=None): self.config = config if config is not None else {} self.ignoreIntents = ignoreIntents self.ignoreLayout = ignoreLayout def train(self, sessions): """ Trains the model. """ pass def test(self, sessions, reportPositionPerplexity=True): """ Evaluates the prediciton power of the click model for a given sessions. Returns the log-likelihood, perplexity, position perplexity (perplexity for each rank a.k.a. position in a SERP) and separate perplexity values for clicks and non-clicks (skips). """ logLikelihood = 0.0 positionPerplexity = [0.0] * self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY) positionPerplexityClickSkip = [[0.0, 0.0] \ for i in xrange(self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY))] counts = [0] * self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY) countsClickSkip = [[0, 0] \ for i in xrange(self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY))] possibleIntents = [False] if self.ignoreIntents else [False, True] for s in sessions: iw = s.intentWeight intentWeight = {False: 1.0} if self.ignoreIntents else {False: 1 - iw, True: iw} clickProbs = self._get_click_probs(s, possibleIntents) N = min(len(s.clicks), self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY)) if self.config.get('DEBUG', DEBUG): assert N > 1 x = sum(clickProbs[i][N // 2] * intentWeight[i] for i in possibleIntents) / sum(clickProbs[i][N // 2 - 1] * intentWeight[i] for i in possibleIntents) s.clicks[N // 2] = 1 if s.clicks[N // 2] == 0 else 0 clickProbs2 = self._get_click_probs(s, possibleIntents) y = sum(clickProbs2[i][N // 2] * intentWeight[i] for i in possibleIntents) / sum(clickProbs2[i][N // 2 - 1] * intentWeight[i] for i in possibleIntents) assert abs(x + y - 1) < 0.01, (x, y) # Marginalize over possible intents: P(C_1, ..., C_N) = \sum_{i} P(C_1, ..., C_N | I=i) P(I=i) logLikelihood += math.log(sum(clickProbs[i][N - 1] * intentWeight[i] for i in possibleIntents)) / N correctedRank = 0 # we are going to skip clicks on fake pager urls for k, click in enumerate(s.clicks): click = 1 if click else 0 if s.extraclicks.get('TRANSFORMED', False) and \ (k + 1) % (self.config.get('SERP_SIZE', SERP_SIZE) + 1) == 0: if self.config.get('DEBUG', DEBUG): assert s.results[k] == 'PAGER' continue
# P(C_k | C_1, ..., C_{k-1}) = \sum_I P(C_1, ..., C_k | I) P(I) / \sum_I P(C_1, ..., C_{k-1} | I) P(I) curClick = dict((i, clickProbs[i][k]) for i in possibleIntents) prevClick = dict((i, clickProbs[i][k - 1]) for i in possibleIntents) if k > 0 else dict((i, 1.0) for i in possibleIntents) logProb = math.log(sum(curClick[i] * intentWeight[i] for i in pos
sibleIntents), 2) - math.log(sum(prevClick[i] * intentWeight[i] for i in possibleIntents), 2) positionPerplexity[correctedRank] += logProb positionPerplexityClickSkip[correctedRank][click] += logProb counts[correctedRank] += 1 countsClickSkip[correctedRank][click] += 1 correctedRank += 1 positionPerplexity = [2 ** (-x / count if count else x) for (x, count) in zip(positionPerplexity, counts)] positionPerplexityClickSkip = [[2 ** (-x[click] / (count[click] if count[click] else 1) if count else x) \ for (x, count) in zip(positionPerplexityClickSkip, countsClickSkip)] for click in xrange(2)] perplexity = sum(positionPerplexity) / len(positionPerplexity) if reportPositionPerplexity: return logLikelihood / len(sessions), perplexity, positionPerplexity, positionPerplexityClickSkip else: return logLikelihood / len(sessions), perplexity def _get_click_probs(self, s, possible_intents): """ Returns click probabilities list for a given list of s.clicks. For each intent $i$ and each rank $k$ we have: click_probs[i][k-1] = P(C_1, ..., C_k | I=i) """ click_probs = dict((i, [0.5 ** (k + 1) for k in xrange(len(s.clicks))]) for i in possible_intents) return click_probs def get_loglikelihood(self, sessions): """ Returns the average log-likelihood of the current model for given sessions. This is a lightweight version of the self.test() method. """ return sum(self.get_log_click_probs(s) for s in sessions) / len(sessions) def get_log_click_probs(self, session): """ Returns an average log-likelihood for a given session, i.e. log-likelihood of all the click events, divided by the number of documents in the session. """ possibleIntents = [False] if self.ignoreIntents else [False, True] intentWeight = {False: 1.0} if self.ignoreIntents else \ {False: 1 - session.intentWeight, True: session.intentWeight} clickProbs = self._get_click_probs(s, possibleIntents) N = min(len(session.clicks), self.config.get('MAX_DOCS_PER_QUERY', MAX_DOCS_PER_QUERY)) # Marginalize over possible intents: P(C_1, ..., C_N) = \sum_{i} P(C_1, ..., C_N | I=i) P(I=i) return math.log(sum(clickProbs[i][N - 1] * intentWeight[i] for i in possibleIntents)) / N def get_model_relevances(self, session, intent=False): """ Returns estimated relevance of each document in a given session based on a trained click model. """ raise NotImplementedError def predict_click_probs(self, session, intent=False): """ Predicts click probabilities for a given session. Does not use session.clicks. This is a vector of P(C_k = 1 | E_k = 1) for different ranks $k$. """ raise NotImplementedError def predict_stop_probs(self, session, intent=False): """ Predicts stop probabilities (after click) for each document in a session. This is often referred to as satisfaction probability. This is a vector of P(S_k = 1 | C_k = 1) for different ranks $k$. """ raise NotImplementedError def get_abandonment_prob(self, rank, intent=False, layout=None): """ Predicts probability of stopping without click after examining document at rank `rank`. """ return 0.0 def generate_clicks(self, session): """ Generates clicks for a given session, assuming cascade examination order. """ clicks = [0] * len(session.results) # First, randomly select user intent. intent = False # non-vertical intent by default if not self.ignoreIntents: random_intent_prob = random.uniforma(0, 1) if random_intent_prob < session.intentWeight: intent = True predicted_click_probs = self.predict_click_probs(session, intent) predicted_stop_probs = self.predict_stop_probs(session, intent) for rank, result in enumerate(session.results): random_click_prob = random.uniform(0, 1) clicks[rank] = 1 if random_click_prob < predicted_click_probs[rank] else 0 if clicks[rank] == 1: random_st
ywcui1990/nupic
src/nupic/data/stats.py
Python
agpl-3.0
6,351
0.013384
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import os import pickle from pkg_resources import resource_filename from nupic.regions.record_sensor import RecordSensor from nupic.data.file_record_stream import FileRecordStream """ Generate column statistics for a StandardSource. Each entry in statsInfo corresponds to one column, and contains a list of statistics that should be computed for that column. Known statistics are: for floating point or integer values: number -- min, max, mean for string or integer values: category -- list of all unique values and count The model for a stats object is that you call the constructor with the first value, and then add values with add(). (The alternative would be no args for the constructor, and all values would be added with add()). There are two reasons for this: - no initialization check required every time we add a value - getStats() can always return a valid result """ class NumberStatsCollector(object): validTypes = [int, float] def __init__(self): self.min = 0 self.max = 0 self.sum = 0 self.n = 0 self.initialized = False def _addFirst(self, value): if type(value) not in self.validTypes: raise RuntimeError("NumberStatsCollector -- value '%s' is not a valid type" % value) value = float(value) self.min = value self.max = value self.sum = value self.n = 1 self.initialized = True def add(self, value): if not self.initialized: self.
_addFirst(value) return value = float(value) if value < self.min: self.min = value if value > self.max: self.max = value self.sum += value self.n += 1 def getStats(self): return dict(min = self.min, max =
self.max, sum = self.sum, n = self.n, average = self.sum / self.n) class CategoryStatsCollector(object): def __init__(self): self.categories = dict() def add(self, value): self.categories[value] = self.categories.get(value, 0) + 1 def getStats(self): return dict(categories = self.categories) def getStatsFilename(filename, statsInfo, filters=[]): if not os.path.isabs(filename): raise RuntimeError("Filename %s is not an absolute path" % filename) if not filename.endswith(".csv"): raise RuntimeError("generateStats only supports csv files: %s" % filename) d = os.path.dirname(filename) basename = os.path.basename(filename).replace("csv", "stats") sstring = "stats" for key in statsInfo: sstring += "_" + key if len(filters) > 0: sstring += "_filters" for filter in filters: sstring += "_" + filter.getShortName() statsFilename = os.path.join(d, sstring + "_" + basename) return statsFilename def generateStats(filename, statsInfo, maxSamples = None, filters=[], cache=True): """Generate requested statistics for a dataset and cache to a file. If filename is None, then don't cache to a file""" # Sanity checking if not isinstance(statsInfo, dict): raise RuntimeError("statsInfo must be a dict -- " "found '%s' instead" % type(statsInfo)) filename = resource_filename("nupic.datafiles", filename) if cache: statsFilename = getStatsFilename(filename, statsInfo, filters) # Use cached stats if found AND if it has the right data if os.path.exists(statsFilename): try: r = pickle.load(open(statsFilename, "rb")) except: # Ok to ignore errors -- we will just re-generate the file print "Warning: unable to load stats for %s -- " \ "will regenerate" % filename r = dict() requestedKeys = set([s for s in statsInfo]) availableKeys = set(r.keys()) unavailableKeys = requestedKeys.difference(availableKeys) if len(unavailableKeys ) == 0: return r else: print "generateStats: re-generating stats file %s because " \ "keys %s are not available" % \ (filename, str(unavailableKeys)) os.remove(filename) print "Generating statistics for file '%s' with filters '%s'" % (filename, filters) sensor = RecordSensor() sensor.dataSource = FileRecordStream(filename) sensor.preEncodingFilters = filters # Convert collector description to collector object stats = [] for field in statsInfo: # field = key from statsInfo if statsInfo[field] == "number": # This wants a field name e.g. consumption and the field type as the value statsInfo[field] = NumberStatsCollector() elif statsInfo[field] == "category": statsInfo[field] = CategoryStatsCollector() else: raise RuntimeError("Unknown stats type '%s' for field '%s'" % (statsInfo[field], field)) # Now collect the stats if maxSamples is None: maxSamples = 500000 for i in xrange(maxSamples): try: record = sensor.getNextRecord() except StopIteration: break for (name, collector) in statsInfo.items(): collector.add(record[name]) del sensor # Assemble the results and return r = dict() for (field, collector) in statsInfo.items(): stats = collector.getStats() if field not in r: r[field] = stats else: r[field].update(stats) if cache: f = open(statsFilename, "wb") pickle.dump(r, f) f.close() # caller may need to know name of cached file r["_filename"] = statsFilename return r
scattermagic/django-wizard-builder
wizard_builder/migrations/0009_pagebase_to_questionpage.py
Python
bsd-3-clause
1,476
0.001355
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-08-02 21:54 from __future__ import unicode_literals from django.db import migrations, models def copy_to_question_page(apps, schema_editor): current_database = schema_editor.connection.alias QuestionPage = apps.get_model('wizard_builder.QuestionPage') for page
in QuestionPage.objects.using(current_database): page.new_position = page.position page.new_section = page.section for site in page.sites.all(): page.new_sites.add(site) page.save() class Migration(migrations.Migration): dependencies = [ ('sites', '0002_alter_domain_unique'),
('wizard_builder', '0008_remove_textpage'), ] operations = [ migrations.AddField( model_name='questionpage', name='new_position', field=models.PositiveSmallIntegerField(default=0, verbose_name='position'), ), migrations.AddField( model_name='questionpage', name='new_section', field=models.IntegerField(choices=[(1, 'When'), (2, 'Where'), (3, 'What'), (4, 'Who')], default=1), ), migrations.AddField( model_name='questionpage', name='new_sites', field=models.ManyToManyField(to='sites.Site'), ), migrations.RunPython( copy_to_question_page, reverse_code=migrations.RunPython.noop, ), ]
chrys87/fenrir
src/fenrirscreenreader/screenDriver/ptyDriver.py
Python
lgpl-3.0
9,221
0.010845
#!/bin/python # -*- coding: utf-8 -*- # Fenrir TTY screen reader # By Chrys, Storm Dragon, and contributers. import os, struct, sys, pty, tty, termios, shlex, signal, pyte, time, fcntl ,getpass from select import select from fenrirscreenreader.core import debug from fenrirscreenreader.core.eventData import fenrirEventType from fenrirscreenreader.core.screenDriver import screenDriver from fenrirscreenreader.utils import screen_utils class fenrirScreen(pyte.Screen): def set_margins(self, *args, **kwargs): kwargs.pop("private", None) super(fenrirScreen, self).set_margins(*args, **kwargs) class Terminal: def __init__(self, columns, lines, p_in): self.text = '' self.attributes = None self.screen = fenrirScreen(columns, lines) self.screen.write_process_input = \ lambda data: p_in.write(data.encode()) self.stream = pyte.ByteStream() self.stream.attach(self.screen) def feed(self, data): self.stream.feed(data) def updateAttributes(self, initialize = False): buffer = self.screen.buffer lines = None if not initialize: lines = self.screen.dirty else: lines = range(self.screen.lines) self.attributes = [[list(attribute[1:]) + [False, 'default', 'default'] for attribute in line.values()] for line in buffer.values()] for y in lines: try: t = self.attributes[y] except: self.attributes.append([]) self.attributes[y] = [list(attribute[1:]) + [False, 'default', 'default'] for attribute in (buffer[y].values())] if len(
self.attributes[y]) < self.screen.columns: diff = self.screen.columns - len(self.attributes[y]) self.attributes[y] += [['default', 'default', False, False, False, False, False, False, 'default', 'default']] * diff def resize(self, lines, columns): self.screen.resize(lines, co
lumns) self.setCursor() self.updateAttributes(True) def setCursor(self, x = -1, y = -1): xPos = x yPos = y if xPos == -1: xPos = self.screen.cursor.x if yPos == -1: yPos = self.screen.cursor.y self.screen.cursor.x = min(self.screen.cursor.x, self.screen.columns - 1) self.screen.cursor.y = min(self.screen.cursor.y, self.screen.lines - 1) def GetScreenContent(self): cursor = self.screen.cursor self.text = '\n'.join(self.screen.display) self.updateAttributes(self.attributes == None) self.screen.dirty.clear() return {"cursor": (cursor.x, cursor.y), 'lines': self.screen.lines, 'columns': self.screen.columns, "text": self.text, 'attributes': self.attributes.copy(), 'screen': 'pty', 'screenUpdateTime': time.time(), }.copy() class driver(screenDriver): def __init__(self): screenDriver.__init__(self) self.signalPipe = os.pipe() self.p_out = None self.terminal = None self.p_pid = -1 signal.signal(signal.SIGWINCH, self.handleSigwinch) def initialize(self, environment): self.env = environment self.command = self.env['runtime']['settingsManager'].getSetting('general','shell') self.shortcutType = self.env['runtime']['inputManager'].getShortcutType() self.env['runtime']['processManager'].addCustomEventThread(self.terminalEmulation) def getCurrScreen(self): self.env['screen']['oldTTY'] = 'pty' self.env['screen']['newTTY'] = 'pty' def injectTextToScreen(self, msgBytes, screen = None): if not screen: screen = self.p_out.fileno() if isinstance(msgBytes, str): msgBytes = bytes(msgBytes, 'UTF-8') os.write(screen, msgBytes) def getSessionInformation(self): self.env['screen']['autoIgnoreScreens'] = [] self.env['general']['prevUser'] = getpass.getuser() self.env['general']['currUser'] = getpass.getuser() def readAll(self, fd, timeout = 0.3, interruptFd = None, len = 65536): msgBytes = b'' fdList = [] fdList += [fd] if interruptFd: fdList += [interruptFd] starttime = time.time() while True: r = screen_utils.hasMoreWhat(fdList, 0.0001) # nothing more to read if not fd in r: break data = os.read(fd, len) if data == b'': raise EOFError msgBytes += data # exit on interrupt available if interruptFd in r: break # respect timeout but wait a little bit of time to see if something more is here if (time.time() - starttime) >= timeout: break return msgBytes def openTerminal(self, columns, lines, command): p_pid, master_fd = pty.fork() if p_pid == 0: # Child. argv = shlex.split(command) env = os.environ.copy() #values are VT100,xterm-256color,linux try: if env["TERM"] == '': env["TERM"] = 'linux' except: env["TERM"] = 'linux' os.execvpe(argv[0], argv, env) # File-like object for I/O with the child process aka command. p_out = os.fdopen(master_fd, "w+b", 0) return Terminal(columns, lines, p_out), p_pid, p_out def resizeTerminal(self,fd): s = struct.pack('HHHH', 0, 0, 0, 0) s = fcntl.ioctl(0, termios.TIOCGWINSZ, s) fcntl.ioctl(fd, termios.TIOCSWINSZ, s) lines, columns, _, _ = struct.unpack('hhhh', s) return lines, columns def getTerminalSize(self, fd): s = struct.pack('HHHH', 0, 0, 0, 0) lines, columns, _, _ = struct.unpack('HHHH', fcntl.ioctl(fd, termios.TIOCGWINSZ, s)) return lines, columns def handleSigwinch(self, *args): os.write(self.signalPipe[1], b'w') def terminalEmulation(self,active , eventQueue): try: old_attr = termios.tcgetattr(sys.stdin) tty.setraw(0) lines, columns = self.getTerminalSize(0) if self.command == '': self.command = screen_utils.getShell() self.terminal, self.p_pid, self.p_out = self.openTerminal(columns, lines, self.command) lines, columns = self.resizeTerminal(self.p_out) self.terminal.resize(lines, columns) fdList = [sys.stdin, self.p_out, self.signalPipe[0]] while active.value: r, _, _ = select(fdList, [], [], 1) # none if r == []: continue # signals if self.signalPipe[0] in r: os.read(self.signalPipe[0], 1) lines, columns = self.resizeTerminal(self.p_out) self.terminal.resize(lines, columns) # input if sys.stdin in r: try: msgBytes = self.readAll(sys.stdin.fileno(), len=4096) except (EOFError, OSError): eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None}) break if self.shortcutType == 'KEY': try: self.injectTextToScreen(msgBytes) except: eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":None}) break else: eventQueue.put({"Type":fenrirEventType.ByteInput, "Data":msgBytes }) # output if self.p_out in r: try: msgBytes = self.readAll(self.p_out.fileno(), interruptFd=sys.stdin.fileno()) except (EOFError, OSError): eventQueue.put({"Type":fenrirEventType.StopMainLoop,"Data":Non
ortoloco/jordbruksmark
jordbruksmark/migrations/0003_auto_20161217_2150.py
Python
gpl-3.0
472
0.002119
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-12-17 20:50 from __future__ import unicode_
literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('jordbruksmark', '0002_auto_20161217_2140'), ] operations = [ migrations.AlterModelOptions(
name='wochen_menge', options={'verbose_name': 'Wochen Menge', 'verbose_name_plural': 'Wochen Mengen'}, ), ]
stackforge/cloudbase-init
cloudbaseinit/metadata/services/nocloudservice.py
Python
apache-2.0
11,949
0
# Copyright 2020 Cloudbase Solutions Srl # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr from oslo_log import log as oslo_logging from cloudbaseinit import conf as cloudbaseinit_conf from cloudbaseinit import exception from cloudbaseinit.metadata.services import base from cloudbaseinit.metadata.services import baseconfigdrive from cloudbaseinit.models import network as network_model from cloudbaseinit.utils import debiface from cloudbaseinit.utils import network as network_utils from cloudbaseinit.utils import serialization CONF = cloudbaseinit_conf.CONF LOG = oslo_logging.getLogger(__name__) class NoCloudNetworkConfigV1Parser(object): NETWORK_LINK_TYPE_PHY = 'physical' NETWORK_LINK_TYPE_BOND = 'bond' NETWORK_LINK_TYPE_VLAN = 'vlan' NETWORK_SERVICE_NAMESERVER = 'nameserver' SUPPORTED_NETWORK_CONFIG_TYPES = [ NETWORK_LINK_TYPE_PHY, NETWORK_LINK_TYPE_BOND, NETWORK_LINK_TYPE_VLAN, NETWORK_SERVICE_NAMESERVER ] def _parse_subnets(self, subnets, link_name): networks = [] if not subnets or not isinstance(subnets, list): LOG.warning("Subnets '%s' is empty or not a list.", subnets) return networks for subnet in subnets: if not isinstance(subnet, dict): LOG.warning("Subnet '%s' is not a dictionary", subnet) continue if subnet.get("type") in ["dhcp", "dhcp6"]: continue routes = [] for route_data in subnet.get("routes", []): route_netmask = route_data.get("netmask") route_network = route_data.get("network") route_network_cidr = network_utils.ip_netmask_to_cidr( route_network, route_netmask) route_gateway = route_data.get("gateway") route = network_model.Route( network_cidr=route_network_cidr, gateway=route_gateway ) routes.append(route) address_cidr = subnet.get("address") netmask = subnet.get("netmask") if netmask: address_cidr = network_utils.ip_netmask_to_cidr( address_cidr, netmask) gateway = subnet.get("gateway") if gateway: # Map the gateway as a default route, depending on the # IP family / version (4 or 6) gateway_net_cidr = "0.0.0.0/0" if netaddr.valid_ipv6(gateway): gateway_net_cidr = "::/0" routes.append( network_model.Route( network_cidr=gateway_net_cidr, gateway=gateway ) ) networks.append(network_model.Network( link=link_name, address_cidr=address_cidr, dns_nameservers=subnet.get("dns_nameservers"), routes=routes )) return networks def _parse_physical_config_item(self, item): if not item.get('name'): LOG.warning("Physical NIC d
oes not have a name.") return link = network_model.Link( id=item.get('name'), name=item.get('name'), type=network_model.LINK_TYPE_PHYSICAL, enabled=True, mac_address=item.get('mac_address'), mtu=item.get('mtu'), bond=None, vlan_link=None, vlan_id=None ) return network_model.NetworkDetailsV2( links=[link],
networks=self._parse_subnets(item.get("subnets"), link.name), services=[] ) def _parse_bond_config_item(self, item): if not item.get('name'): LOG.warning("Bond does not have a name.") return bond_params = item.get('params') if not bond_params: LOG.warning("Bond does not have parameters") return bond_mode = bond_params.get('bond-mode') if bond_mode not in network_model.AVAILABLE_BOND_TYPES: raise exception.CloudbaseInitException( "Unsupported bond mode: %s" % bond_mode) bond_lacp_rate = None if bond_mode == network_model.BOND_TYPE_8023AD: bond_lacp_rate = bond_params.get('bond-lacp-rate') if (bond_lacp_rate and bond_lacp_rate not in network_model.AVAILABLE_BOND_LACP_RATES): raise exception.CloudbaseInitException( "Unsupported bond lacp rate: %s" % bond_lacp_rate) bond_xmit_hash_policy = bond_params.get('xmit_hash_policy') if (bond_xmit_hash_policy and bond_xmit_hash_policy not in network_model.AVAILABLE_BOND_LB_ALGORITHMS): raise exception.CloudbaseInitException( "Unsupported bond hash policy: %s" % bond_xmit_hash_policy) bond_interfaces = item.get('bond_interfaces') bond = network_model.Bond( members=bond_interfaces, type=bond_mode, lb_algorithm=bond_xmit_hash_policy, lacp_rate=bond_lacp_rate, ) link = network_model.Link( id=item.get('name'), name=item.get('name'), type=network_model.LINK_TYPE_BOND, enabled=True, mac_address=item.get('mac_address'), mtu=item.get('mtu'), bond=bond, vlan_link=None, vlan_id=None ) return network_model.NetworkDetailsV2( links=[link], networks=self._parse_subnets(item.get("subnets"), link.name), services=[] ) def _parse_vlan_config_item(self, item): if not item.get('name'): LOG.warning("VLAN NIC does not have a name.") return link = network_model.Link( id=item.get('name'), name=item.get('name'), type=network_model.LINK_TYPE_VLAN, enabled=True, mac_address=item.get('mac_address'), mtu=item.get('mtu'), bond=None, vlan_link=item.get('vlan_link'), vlan_id=item.get('vlan_id') ) return network_model.NetworkDetailsV2( links=[link], networks=self._parse_subnets(item.get("subnets"), link.name), services=[] ) def _parse_nameserver_config_item(self, item): return network_model.NetworkDetailsV2( links=[], networks=[], services=[network_model.NameServerService( addresses=item.get('address', []), search=item.get('search') )] ) def _get_network_config_parser(self, parser_type): parsers = { self.NETWORK_LINK_TYPE_PHY: self._parse_physical_config_item, self.NETWORK_LINK_TYPE_BOND: self._parse_bond_config_item, self.NETWORK_LINK_TYPE_VLAN: self._parse_vlan_config_item, self.NETWORK_SERVICE_NAMESERVER: self._parse_nameserver_config_item } parser = parsers.get(parser_type) if not parser: raise exception.CloudbaseInitException( "Network config parser '%s' does not exist", parser_type) return parser def parse(self, network_config): links = [] networks = [] services = [] if not network_config: L
hlzz/dotfiles
graphics/cgal/Segment_Delaunay_graph_Linf_2/developer_scripts/lsprotate90.py
Python
bsd-3-clause
636
0.023585
#!/usr/bin/env pyt
hon import sys def inv(s): if s[0] == '-': return s[1:] elif s[0] == '+': return '-' + s[1:] else: # plain number return '-' + s if len(sys.argv) != 1: print 'Usage:', sys.argv[0] sys.exit(1) for line in sys.stdin: linesplit = line.strip().split() if len(linesplit) == 3: assert(linesplit[0] == 'p') print('p ' + inv(linesplit[2]) + ' ' + linesplit[1]) elif len(linesplit) == 5: assert(linesplit[0]
== 's') print('s ' + \ inv(linesplit[2]) + ' ' + linesplit[1] + ' ' + \ inv(linesplit[4]) + ' ' + linesplit[3] ) elif len(linesplit) == 0: print
anparser/anparser
anparser/plugins/other_plugins/yara_parser.py
Python
gpl-3.0
3,312
0.001208
# -*- coding: utf-8 -*- """ anparser - an Open Source Android Artifact Parser Copyright (C) 2015 Preston Miller This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __author__ = 'prmiller91' __license__ = 'GPLv3' __date__ = '20150129' __version__ = '0.00' from collections import OrderedDict import logging import yara import pandas as pd path = None match = None yara_list = [] def yara_parser(file_list, rules_path): """ Parses files for Malware signatures with Yara :param file_list: List of all files :param rules_path: Path to custom Yara rules :return: Dictionary of matches """ try: rules = yara.compile(rules_path) except (yara.libyara_wrapper.YaraSyntaxError, IOError) as exception: msg = 'Yara Rule Compilation Error: {0:s}'.format(rules_path + ' > ' + str(exception)) print(msg) logging.error(msg) raise IOError for file_path in file_list: try: match = rules.match(file_path) except yara.libyara_wrapper.YaraMatchError as exception: msg = 'Yara Match Error: {0:s}'.format(file_path + ' > ' + str(exception)) logging.error(msg) pass if match: yara_processor(match, file_path) return pd.DataFrame(yara_list) def yara_processor(match, path): """ Processes Yara Match for Output :param match: A single yara match :param path: File path for match :return: """ yara_data = OrderedDict() for key in match.keys(): rule = match[key][0]['rule'] matches = match[key][0]['matches'] strings = match[key][0]['strings'] meta = match[key][0]['meta'] tags = match[key][0]['tags'] for string in strings: yara_data['File Path'] = path yara_data['Rule'] = rule yara_data['Matches'] = str(matches) if meta != {}: try: yara_data['Author'] = meta['author'] except KeyError: yara_data['Author'] = '' try: yara_data['Description'] = meta['description'] except KeyError: yara_data['Description'] = ''
else: yara_data['Author'] = '' yara_data['Description'] = '' yara_data['Flag'] = string['flags'] yara_data['Identifier'] = string['identifier'] yara_data['Data'] = string['data'] yara_data['Offset'] = string['offset'] if tags == []: yara_data['Tags'] = '' else: yara_data['Tags'] = tags yara_list.append(yara_data) yara_data = Ord
eredDict()
RetailMeNotSandbox/dart
src/python/dart/model/api_key.py
Python
mit
370
0.002703
from dart.model.ba
se imp
ort BaseModel, dictable @dictable class ApiKey(BaseModel): def __init__(self, id, user_id, api_key, api_secret): """ :type user_id: str :type api_key: str :type api_secret: str """ self.id = id self.user_id = user_id self.api_key = api_key self.api_secret = api_secret
benjaminhabbel/motion_recorder
old/button_loop.py
Python
gpl-3.0
770
0
import time import recordlib if __name__ == "__main__": recordlib.initialize() print("waiting for input") recordlib.logging.info("waiting for input") try: # define interrupt, get rising signal, debounce pin recordlib.GPIO.add_event_detect( recordlib.TASTER_1, recordlib.GPIO.RISING, callback=recordlib.start_recording,
bouncetime=1000 ) recordlib.GPIO.add_event_detect( recordlib.TASTER_2, recordlib.GPIO.RISING, callback=recordlib.stop_recording, bouncetime=1000 ) # keep script running while True: time
.sleep(0.5) finally: recordlib.GPIO.cleanup() print("\nQuit\n")
VDBWRAIR/bactpipeline
test/test_fix_fastq.py
Python
gpl-2.0
2,468
0.040519
from __future__ import print_function from imports import * import common class Base( common.Base ): pass class TestUnitMiSeqToNewbler( Base ): def _C( self, *args, **kwargs ): from bactpipeline.fix_fastq import miseq_to_newbler_id return miseq_to_newbler_id( *args, **kwargs ) def test_r1_correct( self ): r = self._C( 'abcd 1' ) eq_( 'abcd#0/1 (abcd 1)', r ) def test_r2_correct( self ): r = self._C( 'abcd 2' ) eq_( 'abcd#0/2 (abcd 2)', r ) class TestUnitModFqRead( Base ): def _C( self, *args, **kwargs ): from bactpipelin
e.fix_fastq import mod_fq_read return mod_fq_read( *args, **kwargs ) def test_mods_correctly( s
elf ): from bactpipeline.fix_fastq import miseq_to_newbler_id as mtni id = 'abcd 1' seq = 'ATGC' qual = 'IIII' r = self._C( id, seq, qual ) read = '{0}\n{1}\n+\n{2}\n'.format(mtni(id),seq,qual) eq_( read, r ) class TestUnitParseFq( Base ): def _C( self, *args, **kwargs ): from bactpipeline.fix_fastq import parse_fq return parse_fq( *args, **kwargs ) def fake_fq( self ): with open( 'fake.fq', 'w' ) as fh: for i in range( 1, 101 ): fh.write( '@abcd:{0} {1}\n'.format( i, (i%2)+1) ) fh.write( 'ACGT\n' ) fh.write( '+\n' ) fh.write( 'IIII\n' ) return 'fake.fq' def test_parses( self ): fq = self.fake_fq() r = self._C( fq ) for id, seq, qual in r: ids = id.split() x = ids[0].split(':') eq_( '@abcd', x[0] ) eq_( 'ACGT', seq ) eq_( 'IIII', qual ) class TestFunctional( Base ): def sample_files( self ): fixdir = join( dirname(__file__), 'fixtures', 'fix_fastq' ) return glob( join( fixdir, '*.fastq' ) ) def _C( self, *args, **kwargs ): script = 'fix_fastq' cmd = [script] if kwargs.get('outdir',False): cmd += ['-o', kwargs.get('outdir')] cmd += list(*args) print(cmd) return subprocess.call( cmd ) def test_runs_correctly( self ): fastqs = self.sample_files() r = self._C( fastqs ) eq_( 0, r ) ok_( exists( 'outdir' ), 'did not create outdir by default' ) fqs = os.listdir( 'outdir' ) eq_( set([]), set([basename(fq) for fq in fastqs]) - set(fqs) )
sdpython/ensae_teaching_cs
_unittests/ut_special/test_LONG_image2.py
Python
mit
2,586
0.001933
""" @brief test log(time=200s) """ import os import unittest import math import warnings from pyquickhelper.loghelper import fLOG from pyquickhelper.pycode import get_temp_folder, is_travis_or_appveyor from ensae_teaching_cs.special.image.image_synthese_base import Vecteur, Couleur, Source, Repere from ensae_teaching_cs.special.image.image_synthese_sphere import Sphere from ensae_teaching_cs.special.image.image_synthese_phong import ScenePhong from ensae_teaching_cs.special.image.image_synthese_facette import Rectangle from ensae_teaching_cs.special.image.image_synthese_facette_image import RectangleImage, SphereReflet class TestImageSyntheseImage(unittest.TestCase): def test_scene_image(self): fLOG( __file__, self._testMethodName, OutputPrint=__name__ == "__main__") temp = get_temp_folder(__file__, "temp_scene_bette") image = os.path.join(temp, "..", "data", "bette_davis.png") s = ScenePhong(Repere(), math.pi / 1.5, 400, 200) s.ajoute_source(Source(Vecteur(0, 8, 8), Couleur(0.4, 0.4, 0.4))) s.ajoute_source(Source(Vecteur(10, 0, 0), Couleur(0.4, 0.4, 0.4))) s.ajoute_source(Source(Vecteur(8, 8, 4.5), Couleur(0.4, 0.4, 0.4))) s.ajoute_objet(Sphere(Vecteur(3, -4, 7), 1, Couleur(1, 0, 0))) s.ajoute_objet(SphereReflet(Vecteur(0, -400, 12), 396, Couleur(0.5, 0.5, 0.5), 0.5)) s.ajoute_source(Source(Vecteur(7, 2, 8), Couleur(0.2, 0.2, 0.2))) s.ajoute_source(Source(Vecteur(12.5, 3, 5), Couleur(0.2, 0.2, 0.2))) s.ajoute_source(Source(Vecteur(-12.5, 1, 6), Couleur(0.2, 0.2, 0.2))) s.ajoute_objet(Rectangle(Vecteur(-12.4, 0.99, 5.9), Vecteur(-12.6, 0.99, 5.9),
Vecteur(-12.6, 0.99, 6.1), None, Couleur(0, 0, 0))) if is_travis_or_appveyor() == "travis": warnings.warn("pygame is not available") return import pygame
s.ajoute_objet(RectangleImage(Vecteur(8, -3.5, 9), Vecteur(2, -3.5, 8), Vecteur(2, 3.8, 8), None, image, invertx=True, pygame=pygame)) from ensae_teaching_cs.helpers.pygame_helper import wait_event screen = pygame.display.set_mode(s.dim) screen.fill((255, 255, 255)) s.construit_image(screen, pygame=pygame, fLOG=fLOG) pygame.image.save(screen, os.path.join(temp, "scene_bette.png")) if __name__ == "__main__": wait_event(pygame) if __name__ == "__main__": unittest.main()
bchiroma/dreamproject
dream/simulation/Frame.py
Python
gpl-3.0
1,515
0.014521
# =========================================================================== # Copyright 2013 University of Limerick # # This file is part of DREAM. # # DREAM is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DREAM is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DREAM. If not, see <h
ttp://www.gnu.org/licenses/>. # =========================================================================== ''' Created on 18 Feb 2013 @author:
George ''' ''' models a frame entity. This can flow through the system and carry parts ''' from simpy import Resource from Globals import G from Entity import Entity #The entity object class Frame(Entity): type="Frame" capacity=4 #the number of parts that the frame can take def __init__(self, id=None, name=None,**kw): Entity.__init__(self,id=id,name = name) self.Res=Resource(self.capacity) #dimension data self.width=2.0 self.height=2.0 self.lenght=2.0 def getFrameQueue(self): return self.Res.users
aarestad/gradschool-stuff
xml-class/python-xml/JobMarkupLanguage/xparser.py
Python
gpl-2.0
2,877
0.014599
# # This is a parser that generates the document tree for you. # # To use this parser, create an instance of XElementParser: # parser = saxexts.make_parser() # xp = XElementParser(parser) # # If you have defined classes in the current environment, you might want ot # pass this environment *to* the parser, so your classes will be created as # tree nodes instead of the default (base) XElement class instances: # # # def MyElementClass1(XElement): ... # def MyElementClass2(XElement): ... # ... # # parser = saxexts.make_parser() # xp = XElementParser(parser, vars()) # # Once your parser is constructed, you can parse one or more documents as # follows: # doc_list = ['f1','f2','f3'] # -or- # doc_list = ['url1','url2','url3'] # # for doc in doc_list: # doc_tree = xp.process(doc) # print doc_tree.toXML() import string import sys import types from xml.sax import saxexts from xml.sax import saxlib from xelement import XElement, XTreeHandler class XElementParser: def __init__(self, outer_env={}, parser=None): if parser == None: self.parser = saxexts.XMLValParserFactory.make_parser() else: self.parser = parser self.parser_error_handler = ErrorPrinter() self.parser.setErrorHandler(self.parser_error_handler) self.xth = XTreeHandler(IgnoreWhiteSpace='yes', RemoveWhiteSpace='yes', CreateElementMap='yes', RequireUserClasses='yes') for x in outer_env.keys(): if type(outer_env[x]) == types.ClassType or isinstance(x, object): self.xth.registerElementClass(outer_env[x], x) self.parser.setDocumentHandler(self.xth) def process(self, document_uri): Ok=None try: self.parser
_error_handler.reset() self.parser.parse(document_uri) if self.parser_error_handler.has_errors(): raise "va
lidation failed" return self.xth.getDocument().getChild() except IOError,e: print "\nI/O Error: " + document_uri + ": " + str(e) except saxlib.SAXException,e: print "\nParse Error: " + document_uri + ": " + str(e) class ErrorPrinter: "A simple class that just prints error messages to standard out." def __init__(self): self.error_count = 0 def reset(self): self.error_count = 0 def has_errors(self): return self.error_count def warning(self, exception): print "Warning: %s %s" % (str(exception), exception.getMessage()) sys.exit(1) def error(self, exception): self.error_count = self.error_count + 1 print "Error: %s %s" % (str(exception), exception.getMessage()) def fatalError(self, exception): self.error_count = self.error_count + 1 print "Fatal Error: %s %s" % (str(exception), exception.getMessage())
susurrant-audio/scdown
scdown/celeryconfig.py
Python
mit
747
0
import os import re BROKER_URL = os.getenv("CLOUDAMQP_URL", 'amqp://') # BROKER_POOL_LIMIT = None MONGOLAB_URI = None MONGOLAB_DB = None URI_WITH_AUTH = None mongolab = os.getenv("MONGOLAB_UR
I") if mongolab is not None: uri_pat = r"mongodb://([^:]+):([^@]+)@([^:]+):(\d+)/(.+)" user, passwd, host, port, db = re.match(uri_pat, mongolab).groups() uri = "mongodb://{}:{}".format(host, port) MONGOLAB_URI = uri MONGOLAB_DB = db # CELERY_RESULT_BACKEND = uri # CELERY_MONGODB_BACKEND_SETTINGS = { # 'database': db, # 'user': user, # 'password': passwd # } CELERY_RESULT_BACKEND = BROKER_URL CELERY_TASK_SERIALIZER = 'json' CELERY_RESULT
_SERIALIZER = 'json' CELERY_ACCEPT_CONTENT = ['json']
kewisch/bedrock
bedrock/firefox/views.py
Python
mpl-2.0
18,915
0.000212
# -*- coding: utf-8 -*- # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import json import re from django.conf import settings from django.db.models import Q from django.http import ( Http404, HttpResponsePermanentRedirect, HttpResponseRedirect) from django.shortcuts import get_object_or_404 from django.views.decorators.csrf import csrf_exempt, csrf_protect from django.views.decorators.vary import vary_on_headers from django.views.generic.base import TemplateView import basket from funfactory.urlresolvers import reverse from jingo_minify.helpers import BUILD_ID_JS, BUNDLE_HASHES from lib import l10n_utils from rna.models import Release from bedrock.firefox import version_re from bedrock.firefox.forms import SMSSendForm from bedrock.mozorg.context_processors import funnelcake_param from bedrock.mozorg.decorators import cache_control_expires from bedrock.mozorg.views import process_partnership_form from bedrock.mozorg.helpers.misc import releasenotes_url from bedrock.firefox.utils import is_current_or_newer from bedrock.firefox.firefox_details import firefox_details, mobile_details from lib.l10n_utils.dotlang import _ UA_REGEXP = re.compile(r"Firefox/(%s)" % version_re) LANG_FILES = ['firefox/partners/index'] LOCALE_FXOS_HEADLINES = { 'de': { 'title': u"Firefox OS ist richtungsweisend für die Zukunft des " u"mobilen Marktes", 'url': 'http://blog.mozilla.org/press-de/2014/02/23/' 'firefox-os-ist-richtungsweisend-fur-die-zukunft-des-mobilen-' 'marktes', }, 'en-GB': { 'title': u'Firefox OS Unleashes the Future of Mobile', 'url': 'http://blog.mozilla.org/press-uk/2014/02/23/' 'firefox-os-unleashes-the-future-of-mobile' }, 'en-US': { 'title': _('Firefox OS Unleashes the Future of Mobile'), 'url': 'https://blog.mozilla.org/press/2014/02/firefox-os-future-2/', }, 'es-AR': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'es-CL': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'es-ES': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'https://blog.mozilla.org/press/2014/02/firefox-os-future-2/', }, 'es-MX': { 'title': u'Firefox OS te desvela el futuro de lo móvil', 'url': 'http://blog.mozilla.org/press-latam/2014/02/23/' 'firefox-os-te-desvela-el-futuro-de-lo-movil/', }, 'fr': { 'title': u'Firefox OS chamboule le futur du mobile', 'url': 'http://blog.mozilla.org/press-fr/2014/02/23/' 'firefox-os-chamboule-le-futur-du-mobile', }, 'it': { 'title': u'Firefox OS svela il futuro del mobile', 'url': 'http://blog.mozilla.org/press-it/2014/02/23/' 'firefox-os-svela-il-futuro-del-mobile', }, 'pl': { 'title': u'Firefox OS uwalnia przyszłość technologii mobilnej', 'url': 'http://blog.mozilla.org/press-pl/2014/02/23/' 'firefox-os-uwalnia-przyszlosc-technologii-mobilnej', }, 'pt-BR': { 'title': u'Firefox OS apresenta o futuro dos dispositivos móveis', 'url': 'https://blog.mozilla.org/press-br/2014/02/23/' 'firefox-os-apresenta-o-futuro-dos-dispositivos-moveis/', }, } INSTALLER_CHANNElS = [ 'release', 'beta', 'aurora', # 'nightly', # soon ] def get_js_bundle_files(bundle): """ Return a JSON string of the list of file names for lazy loaded javascript. """ # mostly stolen from jingo_minify.helpers.js if settings.DEBUG: items = settings.MINIFY_BUNDLES['js'][bundle] else: build_id = BUILD_ID_JS bundle_full = "js:%s" % bundle if bundle_full in BUNDLE_HASHES: build_id = BUNDLE_HASHES[bundle_full] items = ("js/%s-min.js?build=%s" % (bundle, build_id,),) return json.dumps([settings.MEDIA_URL + i for i in items]) JS_COMMON = get_js_bundle_files('partners_common') JS_MOBILE = get_js_bundle_files('partners_mobile') JS_DESKTOP = get_js_bundle_files('partners_desktop') def get_latest_version(
product='firefox', channel='release'): if channel == 'organizations': channel = 'esr' if product == 'mobile': return mobile_details.latest_version(channel) else: return firefox_details.latest_version(channe
l) def installer_help(request): installer_lang = request.GET.get('installer_lang', None) installer_channel = request.GET.get('channel', None) context = { 'installer_lang': None, 'installer_channel': None, } if installer_lang and installer_lang in firefox_details.languages: context['installer_lang'] = installer_lang if installer_channel and installer_channel in INSTALLER_CHANNElS: context['installer_channel'] = installer_channel return l10n_utils.render(request, 'firefox/installer-help.html', context) @csrf_exempt def sms_send(request): form = SMSSendForm(request.POST or None) if request.method == 'POST' and form.is_valid(): try: basket.send_sms(form.cleaned_data['number'], 'SMS_Android', form.cleaned_data['optin']) except basket.BasketException: msg = form.error_class( [_('An error occurred in our system. ' 'Please try again later.')] ) form.errors['__all__'] = msg else: return HttpResponseRedirect( reverse('firefox.android.sms-thankyou')) return l10n_utils.render(request, 'firefox/android/sms-send.html', {'sms_form': form}) def windows_billboards(req): major_version = req.GET.get('majorVersion') minor_version = req.GET.get('minorVersion') if major_version and minor_version: major_version = float(major_version) minor_version = float(minor_version) if major_version == 5 and minor_version == 1: return l10n_utils.render(req, 'firefox/unsupported/winxp.html') return l10n_utils.render(req, 'firefox/unsupported/win2k.html') def fx_home_redirect(request): return HttpResponseRedirect(reverse('firefox.new')) def dnt(request): response = l10n_utils.render(request, 'firefox/dnt.html') response['Vary'] = 'DNT' return response def all_downloads(request, channel): if channel is None: channel = 'release' if channel == 'organizations': channel = 'esr' version = get_latest_version('firefox', channel) query = request.GET.get('q') channel_names = { 'release': _('Firefox'), 'beta': _('Firefox Beta'), 'aurora': _('Firefox Aurora'), 'esr': _('Firefox Extended Support Release'), } return l10n_utils.render(request, 'firefox/all.html', { 'full_builds': firefox_details.get_filtered_full_builds(version, query), 'test_builds': firefox_details.get_filtered_test_builds(version, query), 'query': query, 'channel': channel, 'channel_name': channel_names[channel], }) @csrf_protect def firefox_partners(request): # If the current locale isn't in our list, return the en-US value press_locale = request.locale if ( request.locale in LOCALE_FXOS_HEADLINES) else 'en-US' template_vars = { 'locale_headline_url': LOCALE_FXOS_HEADLINES[press_locale]['url'], 'locale_headline_title': LOCALE_FXOS_HEADLINES[press_locale]['title'], 'js_common': JS_COMMON, 'js_mobile': JS_MOBILE, 'js_desktop': JS_DESKTOP, } form_kwargs = { 'interest_set': 'fx', 'lead_source': 'www.mozill
diegodelemos/reana-job-controller
reana_job_controller/job_manager.py
Python
mit
4,466
0.000224
# -*- coding: utf-8 -*- # # This file is part of REANA. # Copyright (C) 2019 CERN. # # REANA is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """Job Manager.""" import json import shlex from flask import current_app from reana_commons.utils import calculate_file_access_time from reana_db.database import Session from reana_db.models import Job as JobTable from reana_db.models import JobCache, JobStatus, Workflow class JobManager: """Job management interface.""" def __init__( self, docker_img="", cmd=[], prettified_cmd="", env_vars={}, workflow_uuid=None, workflow_workspace=None, job_name=None, ): """Instanciates basic job. :param docker_img: Docker image. :type docker_img: str :param cmd: Command to execute. :type cmd: list :param prettified_cmd: pretified version of command to execute. :type prettified_cmd: str :param env_vars: Environment variables. :type env_vars: dict :param workflow_uuid: Unique workflow id. :type workflow_uuid: str :param workflow_workspace: Absolute path to workspace :type workflow_workspace: str :param job_name: Name of the job. :type job_name: str """ self.docker_img = docker_img or "" if isinstance(cmd, str): self.cmd = shlex.split(cmd) else: self.cmd = cmd or [] self.prettified_cmd = prettified_cmd self.workflow_uuid = workflow_uuid self.workflow_workspace = workflow_workspace self.job_name = job_name self.env_vars = self._extend_env_vars(env_vars) def execution_hook(fn): """Add before execution hooks and DB operations.""" def wrapper(inst, *args, **kwargs): inst.before_execution() backend_job_id = fn(inst, *args, **kwargs) inst.create_job_in_db(backend_job_id) inst.cache_job() return backend_job_id return wrapper def before_execution(self): """Before job submission hook.""" pass def after_e
xecution(self): """After job submission hook.""" pass @execution_hook def execute(self): """Execute a job. :returns: Job ID. :rtype: str """ raise NotImplementedError def get_status(self): """Get job status. :returns: j
ob status. :rtype: str """ raise NotImplementedError def get_logs(self): """Get job log. :returns: stderr, stdout of a job. :rtype: dict """ raise NotImplementedError def stop(self): """Stop a job.""" raise NotImplementedError def create_job_in_db(self, backend_job_id): """Create job in db.""" job_db_entry = JobTable( backend_job_id=backend_job_id, workflow_uuid=self.workflow_uuid, status=JobStatus.created.name, compute_backend=self.compute_backend, cvmfs_mounts=self.cvmfs_mounts or "", shared_file_system=self.shared_file_system or False, docker_img=self.docker_img, cmd=json.dumps(self.cmd), env_vars=json.dumps(self.env_vars), deleted=False, job_name=self.job_name, prettified_cmd=self.prettified_cmd, ) Session.add(job_db_entry) Session.commit() self.job_id = str(job_db_entry.id_) def cache_job(self): """Cache a job.""" workflow = ( Session.query(Workflow).filter_by(id_=self.workflow_uuid).one_or_none() ) access_times = calculate_file_access_time(workflow.workspace_path) prepared_job_cache = JobCache() prepared_job_cache.job_id = self.job_id prepared_job_cache.access_times = access_times Session.add(prepared_job_cache) Session.commit() def update_job_status(self): """Update job status in DB.""" pass def _extend_env_vars(self, env_vars): """Extend environment variables with REANA specific ones.""" prefix = "REANA" env_vars[prefix + "_WORKSPACE"] = self.workflow_workspace env_vars[prefix + "_WORKFLOW_UUID"] = str(self.workflow_uuid) return env_vars
homhei/glance
glance/db/js.py
Python
apache-2.0
1,449
0.014493
#!/usr/bin/env python #encode=utf-8 #vim: tabstop=4 shiftwidth=4 softtabstop=4 #Created on 2013-6-24 #Copyright 2013 nuoqingyun xuqifeng from bson.code import Code traffic_map = Code("function () {" "emit(this.domain, this.bytes);" "}") traffic_reduce = Code("function (key, values) {" " var sum = 0;" " var count = 0;" " values.forEach(function(byte){" " sum += byte;" " count ++;" "});" " return {'sum':sum, 'count':count};" "}") traffic_reduce1 = Code("function (keyDomain, valuesBytes) {" " return Array.sum(valuesBytes);" "}") traffic_map_test = Code("function () {" "emit(this.domain, {bytes:this.bytes, visit:1, hits:this.code});" "}") traffic_reduce_test = Code("function (key, values) {" " var sum = 0;" " var count = 0;" " var visits = 0;" " values.forEach(function(vals){" " sum += vals.bytes;"
" count += vals.hits;" " visits += vals.visit;"
"});" " return {bytes:sum, visit:visits, hits:count};" "}")
ksmit799/Toontown-Source
toontown/parties/PartyCatchActivityToonSD.py
Python
mit
9,299
0.003979
from pandac.PandaModules import Vec3 from direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func from direct.interval.IntervalGlobal import LerpScaleInterval from direct.interval.IntervalGlobal import WaitInterval, ActorInterval, FunctionInterval from direct.task.Task import Task from direct.directnotify import DirectNotifyGlobal from direct.fsm import StateData from toontown.minigame.OrthoWalk import OrthoWalk from toontown.minigame.MinigameRulesPanel import MinigameRulesPanel from toontown.parties import PartyGlobals from direct.fsm import ClassicFSM, State class PartyCatchActivityToonSD(StateData.StateData): notify = DirectNotifyGlobal.directNotify.newCategory('PartyCatchActivityToonSD') FallBackAnim = 'slip-backward' FallFwdAnim = 'slip-forward' CatchNeutralAnim = 'catch-neutral' CatchRunAnim = 'catch-run' EatNeutralAnim = 'catch-eatneutral' EatNRunAnim = 'catch-eatnrun' animList = [FallBackAnim, FallFwdAnim, CatchNeutralAnim, CatchRunAnim, EatNeutralAnim, EatNRunAnim] def __init__(self, avId, activity): PartyCatchActivityToonSD.notify.debug('init : avId = %s, activity = %s ' % (avId, activity)) self.avId = avId self.activity = activity self.isLocal = avId == base.localAvatar.doId self.toon = self.activity.getAvatar(self.avId) self.unexpectedExit = False self.fsm = ClassicFSM.ClassicFSM('CatchActivityAnimFSM-%s' % self.avId, [State.State('init', self.enterInit, self.exitInit, ['notPlaying', 'normal', 'rules']), State.State('notPlaying', self.enterNotPlaying, self.exitNotPlaying, ['normal', 'rules', 'cleanup']), State.State('rules', self.enterRules, self.exitRules, ['normal', 'cleanup']), State.State('normal', self.enterNormal, self.exitNormal, ['eatFruit', 'fallBack', 'fallForward', 'notPlaying']), State.State('eatFruit', self.enterEatFruit, self.exitEatFruit, ['normal', 'fallBack', 'fallForward', 'eatFruit', 'notPlaying']), State.State('fallBack', self.enterFallBack, self.exitFallBack, ['normal', 'notPlaying']), State.State('fallForward', self.enterFallForward, self.exitFallForward, ['normal', 'notPlaying']), State.State('cleanup', self.enterCleanup, self.exitCleanup, [])], 'init', 'cleanup') self.enteredAlready = False def load(self): self.setAnimState('off', 1.0) for anim in self.animList: self.toon.pose(anim, 0) def unload(self): del self.fsm def enter(self): if not self.enteredAlready: self.enteredAlready = True self.fsm.enterInitialState() self._exiting = False def exit(self, unexpectedExit = False): if self._exiting: return self._exiting = True self.unexpectedExit = unexpectedExit if not self.unexpectedExit: self.fsm.requestFinalState() del self._exiting def enterInit(self): self.notify.debug('enterInit') self.toon.startBlink() self.toon.stopLookAround() if self.isLocal: self.activity.initOrthoWalk() self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def exitInit(self): pass def enterNotPlaying(self): self.toon.stopBlink() self.toon.startLookAround() self.setAnimState('neutral', 1.0) if self.isLocal: self.activity.orthoWalk.stop() self.dropShadow.setColor(self.origDropShadowColor) def exitNotPlaying(self): self.dropShadow = self.toon.dropShadow self.origDropShadowColor = self.dropShadow.getColor() c = self.origDropShadowColor alpha = 0.35 self.dropShadow.setColor(c[0], c[1], c[2], alpha) def enterRules(self): if self.isLocal: self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) self.activity.orthoWalk.stop() self.accept(self.activity.rulesDoneEvent, self.handleRulesDone) self.rulesPanel = MinigameRulesPanel('PartyRulesPanel', self.activity.getTitle(), self.activity.getInstructions(), self.activity.rulesDoneEvent, PartyGlobals.DefaultRulesTimeout) base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], False) self.rulesPanel.load() self.rulesPanel.enter() else: self.fsm.request('normal') def handleRulesDone(self): self.fsm.request('normal') def exitRules(self): self.setAnimState('off', 1.0) self.ignore(self.activity.rulesDoneEvent) if hasattr(self, 'rulesPanel'): self.rulesPanel.exit() self.rulesPanel.unload() del self.rulesPanel base.setCellsAvailable(base.bottomCells + [base.leftCells[0], base.rightCells[1]], True) def enterNormal(self): self.notify.debug('enterNormal') self.setAnimState('Catching', 1.0) if self.isLocal: self.activity.orthoWalk.start() self.toon.lerpLookAt(Vec3.forward() + Vec3.up(), time=0.2, blink=0) def exitNormal(self): self.setAnimState('off', 1.0) if self.isLocal: self.activity.orthoWalk.stop() self.toon.lerpLookAt(Vec3.forward(), time=0.2, blink=0) def eatFruit(self, fruitModel, handNode): if self.fsm.getCurrentState().getName() == 'eatFruit': self.fsm.request('normal') self.fsm.request('eatFruit', [fruitModel, handNode]) def enterEatFruit(self, fruitModel, handNode): self.notify.debug('enterEatFruit') self.setAnimState('CatchEating', 1.0) if self.isLocal: self.activity.orthoWalk.start() self.fruitModel = fruitModel renderScale = fruitModel.getScale(render) fruitModel.reparentTo(handNode) fruitModel.setScale(render, renderScale) duration = self.toon.getDuration('catch-eatneutral') self.eatIval = Sequence(Parallel(WaitInterval(duration), Sequence(LerpScaleInterval(fruitModel, duration / 2.0, fruitModel.getScale() * 0.5, blendType='easeInOut'), Func(fruitModel.hide))), Func(self.fsm.request, 'normal'), name=self.toon.uniqueName('eatingIval')) self.eatIval.start() def exitEatFruit(self): self.eatIval.pause() del self.eatIval self.fruitModel.reparentTo(hidden) self.fruitModel.removeNode() del self.fruitModel self.setAnimState('off', 1.0) if self.isLocal: self.activity.orthoWalk.stop() def enterFallBack(self): self.notify.debug('enterFallBack') if self.isLocal: base.playSfx(self.activity.sndOof) duration = 1.0 animName = self.FallBackAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame frameRate = self.toon.getFrameRate(animName) newRate = frames / duration playRate = newRate / frameRate def resume(self = self): self.fsm.request('normal') self.fallBackIval = Sequence(ActorInterval(self.toon, animName, startTime=startFrame / newRate, endTime=totalFrames / newRate, playRate=playRate), FunctionInterval(resume)) self.fallBackIval.start()
def exitFallBack(self): self.fal
lBackIval.pause() del self.fallBackIval def enterFallForward(self): self.notify.debug('enterFallForward') if self.isLocal: base.playSfx(self.activity.sndOof) duration = 2.0 animName = self.FallFwdAnim startFrame = 12 totalFrames = self.toon.getNumFrames(animName) frames = totalFrames - 1 - startFrame pauseFrame = 19 frameRate = self.toon.getFrameRate(animName) newRate = frames / (duration * 0.5) playRate = newRate / frame
soad241/django-notification
notification/models.py
Python
mit
14,929
0.002612
import datetime try: import cPickle as pickle except ImportError: import pickle from django.db import models from django.db.models.query import QuerySet from django.conf import settings from django.core.urlresolvers import reverse from django.template import Context from django.template.loader import render_to_string from django.core.exceptions import ImproperlyConfigured from django.contrib.sites.models import Site from django.contrib.auth.models import User from django.contrib.auth.models import AnonymousUser from django.contrib.contenttypes.models import ContentType from django.contrib.contenttypes import generic from django.utils.translation import ugettext_lazy as _ from django.utils.translation import ugettext, get_language, activate from django.core.mail import EmailMultiAlternatives QUEUE_ALL = getattr(settings, "NOTIFICATION_QUEUE_ALL", False) class LanguageStoreNotAvailable(Exception): pass class NoticeType(models.Model): label = models.CharField(_('label'), max_length=40) display = models.CharField(_('display'), max_length=50) description = models.CharField(_('description'), max_length=100) # by default only on for media with sensitivity less than or equal to this number default = models.IntegerField(_('default')) def __unicode__(self): return self.label class Meta: verbose_name = _("notice type") verbose_name_plural = _("notice types") # if this gets updated, the create() method below needs to be as well... NOTICE_MEDIA = ( ("1", _("Email")), ) # how spam-sensitive is the medium NOTICE_MEDIA_DEFAULTS = { "1": 2 # email } class NoticeSetting(models.Model): """ Indicates, for a given user, whether to send notifications of a given type to a given medium. """ user = models.ForeignKey(User, verbose_name=_('user')) notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type')) medium = models.CharField(_('medium'), max_length=1, choices=NOTICE_MEDIA) send = models.BooleanField(_('send')) class Meta: verbose_name = _("notice setting") verbose_name_plural = _("notice settings") unique_together = ("user", "notice_type", "medium") def get_notification_setting(user, notice_type, medium): try: return NoticeSetting.objects.get( user=user, notice_type=notice_type, medium=medium) except NoticeSetting.DoesNotExist: default = (NOTICE_MEDIA_DEFAULTS[medium] <= notice_type.default) # sometimes other thread already created this setting, created = NoticeSetting.objects.get_or_create( user=user, notice_type=notice_type, medium=medium, send=default) setting.save() return setting def should_send(user, notice_type, medium): if not user.is_active: return False return get_notification_setting(user, notice_type, medium).send class NoticeManager(models.Manager): def notices_for(self, user, archived=False, unseen=None, on_site=None): """ returns Notice objects for the given user. If archived=False, it only include notices not archived. If archived=True, it returns all notices for that user. If unseen=None, it includes all notices. If unseen=True, return only unseen notices. If unseen=False, return only seen notices. """ if archived: qs = self.filter(user=user) else: qs = self.filter(user=user, archived=archived) if unseen is not None: qs = qs.filter(unseen=unseen) if on_site is not None: qs = qs.filter(on_site=on_site) return qs def unseen_count_for(self, user, **kwargs): """ returns the number of unseen notices for the given user but does not mark them seen """ return self.notices_for(user, unseen=True, **kwargs).count() class Notice(models.Model): user = models.ForeignKey(User, verbose_name=_('user')) message = models.TextField(_('message')) notice_type = models.ForeignKey(NoticeType, verbose_name=_('notice type')) added = models.DateTimeField(_('added'), default=datetime.datetime.now) unseen = models.BooleanField(_('unseen'), default=True) archived = models.BooleanField(_('archived'), default=False) on_site = models.BooleanField(_('on site')) objects = NoticeManager() def __unicode__(self): return self.message def archive(self): self.archived = True self.save() def is_unseen(self): """ returns value of self.unseen but also changes it to false. Use this in a template to mark an unseen notice differently the first time it is shown. """ unseen = self.unseen if unseen: self.unseen = False self.save() return unseen class Meta: ordering = ["-added"] verbose_name = _("notice") verbose_name_plural = _("notices") def get_absolute_url(self): return ("notification_notice", [str(self.pk)])
get_absolute_url = models.permalink(get_absolute_
url) class NoticeQueueBatch(models.Model): """ A queued notice. Denormalized data for a notice. """ pickled_data = models.TextField() def create_notice_type(label, display, description, default=2, verbosity=1): """ Creates a new NoticeType. This is intended to be used by other apps as a post_syncdb manangement step. """ try: notice_type = NoticeType.objects.get(label=label) updated = False if display != notice_type.display: notice_type.display = display updated = True if description != notice_type.description: notice_type.description = description updated = True if default != notice_type.default: notice_type.default = default updated = True if updated: notice_type.save() if verbosity > 1: print "Updated %s NoticeType" % label except NoticeType.DoesNotExist: NoticeType(label=label, display=display, description=description, default=default).save() if verbosity > 1: print "Created %s NoticeType" % label def get_notification_language(user): """ Returns site-specific notification language for this user. Raises LanguageStoreNotAvailable if this site does not use translated notifications. """ if getattr(settings, 'NOTIFICATION_LANGUAGE_MODULE', False): try: app_label, model_name = settings.NOTIFICATION_LANGUAGE_MODULE.split('.') model = models.get_model(app_label, model_name) language_model = model._default_manager.get(user__id__exact=user.id) if hasattr(language_model, 'language'): return language_model.language except (ImportError, ImproperlyConfigured, model.DoesNotExist): raise LanguageStoreNotAvailable raise LanguageStoreNotAvailable def get_formatted_messages(formats, label, context): """ Returns a dictionary with the format identifier as the key. The values are are fully rendered templates with the given context. """ format_templates = {} for format in formats: # conditionally turn off autoescaping for .txt extensions in format if format.endswith(".txt"): context.autoescape = False else: context.autoescape = True format_templates[format] = render_to_string(( 'notification/%s/%s' % (label, format), 'notification/%s' % format), context_instance=context) return format_templates def send_now(users, label, extra_context=None, on_site=True): """ Creates a new notice. This is intended to be how other apps create new notices. notification.send(user, 'friends_invite_sent', { 'spam': 'eggs', 'foo': 'bar', ) You can pass in on_site=False to prevent the notice emitted from being displayed on the site. """ if extra_context is None: extra_contex
google-research/policy-learning-landscape
eager_pg/trajectory_batch_stats_test.py
Python
apache-2.0
5,906
0.003725
# coding=utf-8 # Copyright 2018 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you m
ay not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to i
n writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for eager_pg.trajectory_batch_stats. Note that the explicit .numpy() casting also implicitly checks that the methods all return tensors and not numpy arrays. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from eager_pg import trajectory_batch_stats import tensorflow as tf tbs = trajectory_batch_stats TEST_MASK = [[1, 1, 1, 1], [1, 1, 1, 0], [1, 1, 0, 0], [0, 1, 0, 0], [0, 1, 0, 0]] # pyformat: disable # Generally masks will be floats so we can easily multiply tensors. NP_TEST_MASK = np.array(TEST_MASK, dtype=np.float32) class TrajectoryBatchStatsTest(tf.test.TestCase, parameterized.TestCase): """Tests to ensure that statistics on batches of trajectory are correct.""" @property def expected_lengths(self): return tf.constant([3, 5, 2, 1], dtype=tf.float32) def test_get_trajectory_lengths(self): """Checks if the length of each trajectory in the batch is correct.""" # pylint: disable=invalid-name TF_TEST_MASK = tf.constant(NP_TEST_MASK) TF_TEST_MASK_TF_F64 = tf.cast(TF_TEST_MASK, tf.float64) NP_TEST_MASK_NP_F64 = NP_TEST_MASK.astype(np.float64) ALL_MASKS = [ TF_TEST_MASK, NP_TEST_MASK, TF_TEST_MASK_TF_F64, NP_TEST_MASK_NP_F64 ] # pylint: enable=invalid-name for mask in ALL_MASKS: computed_lengths = tbs.get_trajectory_lengths(mask) self.assertTrue(np.allclose(computed_lengths, self.expected_lengths)) def run_without_lengths(self, stats_function, args): """Helper function to run stats.""" return stats_function(*args) def run_with_lengths(self, stats_function, args): """Helper function to run stats with precomputed lengths.""" return stats_function(*args, trajectory_lengths=self.expected_lengths) @parameterized.named_parameters( dict( testcase_name='rewards', raw_batch=np.array([[1, 2, 3, 4]] * 5).astype(np.float32), statistic_function=tbs.reward_summaries, expected_results_with_traj={ 'mean_step_reward': (3. / 3 + 10. / 5 + 6. / 2 + 4. / 1) / 4.0, 'mean_trajectory_reward': (3. + 10. + 6. + 4.) / 4.0, 'stderr_trajectory_reward': np.sqrt(np.sum( (np.array([3., 10., 6., 4.]) - (3. + 10. + 6. + 4.) / 4.0)**2 / 3) / 4) }, expected_results_no_traj={ 'mean_trajectory_reward': (5 + 10 + 15 + 20) / 4.0, 'mean_step_reward': (1 + 2 + 3 + 4) / 4.0 }), dict( testcase_name='entropies', raw_batch=np.array([[1, 2, 3, 4]] * 5).astype(np.float32), statistic_function=tbs.entropy_summaries, expected_results_with_traj={ 'mean_step_entropy': (3. / 3 + 10. / 5 + 6. / 2 + 4. / 1) / 4.0, 'mean_trajectory_entropy': (3. + 10. + 6. + 4.) / 4.0 }), ) def test_calculations(self, raw_batch, statistic_function, expected_results_with_traj, expected_results_no_traj=None): # pylint: disable=g-doc-args """Test calculations of statistc_name on raw_batch using statistic_function. """ stats = [] stats.append( self.run_with_lengths(statistic_function, (raw_batch, NP_TEST_MASK))) stats.append( self.run_without_lengths(statistic_function, (raw_batch, NP_TEST_MASK))) for stat in stats: for expected_key in expected_results_with_traj.keys(): self.assertAllClose(stat[expected_key].numpy(), expected_results_with_traj[expected_key]) if expected_results_no_traj is not None: stat = self.run_without_lengths(statistic_function, (raw_batch,)) for expected_key in expected_results_no_traj.keys(): self.assertAllClose(stat[expected_key].numpy(), expected_results_no_traj[expected_key]) def test_reward_calculations_errors(self): """Ensures that the reward calculations return the correct errors.""" rewards_as_list = [[1, 2, 3, 4]] * 5 self.assertRaises(TypeError, tbs.reward_summaries, rewards_as_list, None) rewards_as_numpy = np.array(rewards_as_list) rewards_as_numpy_wrong_shape = np.expand_dims(rewards_as_numpy, 1) self.assertRaises(ValueError, tbs.reward_summaries, rewards_as_numpy_wrong_shape, None) # TODO(zaf): Find a way to @parameterized this? def test_returns_calculations(self): test_returns = np.array([[0.125, 1.875, 0.25, 1.5], [0.25, 1.75, 0.5, 1.0], [0.5, 1.5, 1.0, 0.0]]) stats = tbs.return_summaries(test_returns) expected_mean_return = (0.125 + 1.875 + 0.25 + 1.5) / 4.0 self.assertEqual(stats['mean_trajectory_return'].numpy(), expected_mean_return) pop_variance = np.sum((test_returns[0] - expected_mean_return)**2 / 3) standard_error = np.sqrt(pop_variance) / np.sqrt(4) self.assertTrue( np.allclose(stats['stderr_trajectory_return'].numpy(), standard_error)) if __name__ == '__main__': tf.enable_eager_execution() tf.test.main()
nexdatas/configtool
test/DefinitionDlg_test.py
Python
gpl-3.0
85,762
0
#!/usr/bin/env python # This file is part of nexdatas - Tango Server for NeXus data writer # # Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de> # # nexdatas is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # nexdatas is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with nexdatas. If not, see <http://www.gnu.org/licenses/>. # \package test nexdatas # \file DefinitionDlgTest.py # unittests for field Tags running Tango Server # import unittest import os import sys import random import struct import binascii import time from PyQt5.QtTest import QTest from PyQt5.QtWidgets import (QApplication, QMessageBox, QTableWidgetItem, QPushButton) from PyQt5.QtCore import Qt, QTimer from PyQt5.QtXml import QDomDocument from nxsconfigtool.DefinitionDlg import DefinitionDlg from nxsconfigtool.ComponentModel import ComponentModel from nxsconfigtool.AttributeDlg import AttributeDlg from nxsconfigtool.NodeDlg import NodeDlg # from nxsconfigtool.ui.ui_definitiondlg import Ui_DefinitionDlg from nxsconfigtool.DomTools import DomTools # Qt-application app = None if sys.version_info > (3,): unicode = str long = int # if 64-bit machione IS64BIT = (struct.calcsize("P") == 8) class TestView(object): def __init__(self, model): self.testIndex = None self.testModel = model self.stack = [] def currentIndex(self): return self.testIndex def model(self): return self.testModel def expand(self, index): self.stack.append("expand") self.stack.append(index) # test fixture class DefinitionDlgTest(unittest.TestCase): # constructor # \param methodName name of the test method def __init__(self, methodName): unittest.TestCase.__init__(self, methodName) self._bint = "int64" if IS64BIT else "int32" self._buint = "uint64" if IS64BIT else "uint32" self._bfloat = "float64" if IS64BIT else "float32" # MessageBox text self.text = None # MessageBox title self.title = None # attribute name self.aname = "myname" # attribute value self.avalue = "myentry" # action status self.performed = False try: self.__seed = long(binascii.hexlify(os.urandom(16)), 16) except NotImplementedError: self.__seed = long(time.time() * 256) self.__rnd = random.Random(self.__seed) # test starter # \brief Common set up def setUp(self): print("\nsetting up...") print("SEED = %s" % self.__seed) # test closer # \brief Common tear down def tearDown(self): print("tearing down ...") def checkMessageBox(self): # self.assertEqual(QApplication.activeWindow(), None) mb = QApplication.activeModalWidget() self.assertTrue(isinstance(mb, QMessageBox)) # print mb.text() self.text = mb.text() self.title = mb.windowTitle() mb.close() def rmAttributeWidget(self): # aw = QApplication.activeWindow() mb = QApplication.activeModalWidget() # print "CLASS", mb # print "CLASS2", aw self.assertTrue(isinstance(mb, QMessageBox)) self.text = mb.text() self.title = mb.windowTitle() QTest.mouseClick(mb.button(QMessageBox.Yes), Qt.LeftButton) def rmAttributeWidgetClose(self): # aw = QApplication.activeWindow() mb = QApplication.activeModalWidget() self.assertTrue(isinstance(mb, QMessageBox)) self.text = mb.text() self.title = mb.windowTitle() QTest.mouseClick(mb.button(QMessageBox.No), Qt.LeftButton) def attributeWidget(self): # aw = QApplication.activeWindow() mb = QApplication.activeModalWidget() self.assertTrue(isinstance(mb, AttributeDlg)) QTest.keyClicks(mb.ui.nameLineEdit, self.aname) self.assertEqual(mb.ui.nameLineEdit.text(), se
lf.aname) QTest.keyClicks(mb.ui.valueLineEdit, self.avalue) self.assertEqual(mb.ui.valueLineEdit.text(), self.avalue) mb.accept() def attributeWidgetClose(self): # aw = QApplication.activeWindow() mb = QApplication.activeModalWidget() self.assertTrue(isinstance(mb, AttributeDlg)) QTest.keyClicks(mb.ui.nameLineEdit, self.aname) self.assertEqual(mb.ui.nameLineEdit.text(), self.ana
me) QTest.keyClicks(mb.ui.valueLineEdit, self.avalue) self.assertEqual(mb.ui.valueLineEdit.text(), self.avalue) # mb.close() mb.reject() # mb.accept() # constructor test # \brief It tests default settings def test_constructor(self): fun = sys._getframe().f_code.co_name print("Run: %s.%s() " % (self.__class__.__name__, fun)) form = DefinitionDlg() form.show() self.assertEqual(form.name, '') self.assertEqual(form.content, '') self.assertEqual(form.doc, '') self.assertEqual(form.attributes, {}) self.assertEqual(form.node, None) self.assertEqual(form.root, None) self.assertEqual(form.view, None) self.assertEqual(form.subItems, ["group", "field", "attribute", "link", "component", "doc", "symbols"]) self.assertEqual(form.ui.__class__.__name__, "Ui_DefinitionDlg") self.assertTrue(isinstance(form, NodeDlg)) self.assertEqual(form.externalApply, None) self.assertEqual(form.externalDSLink, None) self.assertEqual(form.replaceText, super(DefinitionDlg, form).replaceText) self.assertEqual(form.removeElement, super(DefinitionDlg, form).removeElement) self.assertEqual(form.replaceElement, super(DefinitionDlg, form).replaceElement) self.assertEqual(form.appendElement, super(DefinitionDlg, form).appendElement) self.assertEqual(form.reset, super(DefinitionDlg, form).reset) # constructor test # \brief It tests default settings def test_constructor_accept(self): fun = sys._getframe().f_code.co_name print("Run: %s.%s() " % (self.__class__.__name__, fun)) form = DefinitionDlg() form.show() self.assertEqual(form.name, '') self.assertEqual(form.content, '') self.assertEqual(form.doc, '') self.assertEqual(form.attributes, {}) self.assertEqual(form.subItems, ["group", "field", "attribute", "link", "component", "doc", "symbols"]) self.assertEqual(form.ui.__class__.__name__, "Ui_DefinitionDlg") form.createGUI() self.assertTrue(not form.ui.nameLineEdit.text()) self.assertTrue(not form.ui.contentTextEdit.toPlainText()) self.assertTrue(form.ui.applyPushButton.isEnabled()) self.assertTrue(form.ui.resetPushButton.isEnabled()) name = "myname" content = "$components.default" QTest.keyClicks(form.ui.nameLineEdit, name) self.assertEqual(form.ui.nameLineEdit.text(), name) QTest.keyClicks(form.ui.contentTextEdit, content) self.assertEqual(form.ui.contentTextEdit.toPlainText(), content) self.assertTrue(bool(form.ui.nameLineEdit.text())) self.assertTrue(bool(form.ui.contentTextEdit.toPlainText())) QTest.mouseClick(form.ui.applyPushButton, Qt.LeftButton) # form.apply() # self.assertEqual(form.name, name) self.assertEqual(form.result(), 0) # constructor test
dionbosschieter/numatuned
numatuned.py
Python
mit
167
0
#!/usr/bin/env python3 import sys import
numatuned dryrun = False if len(sys.argv) > 1: if sys.argv[1] == '-n': dryrun = Tr
ue numatuned.fire(60, dryrun)
sethuiyer/mlhub
Deep Sentiment Analysis/build_sentiment_model.py
Python
mit
1,105
0.000905
import tflearn from tflearn.data_utils import to_categorical, pad_sequences from tflearn.datasets import imdb # IMDB Dataset loading train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000, valid_portion=0.1) trainX, trainY = train testX, testY = test # Data preprocessing # Sequence padding trainX = pad_sequences(trainX, maxlen=100, value=0.) testX = pad_sequences(testX, maxlen=100, value=0.) # Converting labels to binary vectors t
rainY = to_categorical(trainY, nb_classes=2) testY = to_categorical(testY, nb_classes=2) # Network building net = tflearn.input_data([None, 100]) net = tflearn.embedding(net, input_dim=10000, output_dim=128) net = tflearn.lstm(net, 128, dropout=0.8) net = tflearn.fully_connected(net, 2, activation='softmax') net = tflearn.regression(net, optimizer='adam', learning_rate=0.001, loss='categorical_crossentropy') # Training mode
l = tflearn.DNN(net, tensorboard_verbose=0) model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True, batch_size=32) model.save('sentiment.tflearn')
djaodjin/djaodjin-signup
signup/docs.py
Python
bsd-2-clause
2,317
0.001295
# Copyright (c) 2020, Djaodjin Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIM
ED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EV
EN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #pylint:disable=unused-argument,unused-import try: from drf_yasg.openapi import Response as OpenAPIResponse from drf_yasg.utils import no_body, swagger_auto_schema except ImportError: from functools import wraps from .compat import available_attrs class no_body(object): #pylint:disable=invalid-name pass def swagger_auto_schema(function=None, **kwargs): """ Dummy decorator when drf_yasg is not present. """ def decorator(view_func): @wraps(view_func, assigned=available_attrs(view_func)) def _wrapped_view(request, *args, **kwargs): return view_func(request, *args, **kwargs) return _wrapped_view if function: return decorator(function) return decorator class OpenAPIResponse(object): """ Dummy response object to document API. """ def __init__(self, *args, **kwargs): pass
alex/readthedocs.org
deploy/fabfile.py
Python
mit
940
0.004255
from fabric.api import env, local, run, sudo env.user = 'root' env.hosts = ['204.232.205.6'] env.code_dir = '/home/docs/sites/readthedocs.org/checkouts/readthedocs.org' env.virtualenv = '/home/docs/sites/readthedocs.org' env.rundir = '/home/docs/sites/readthedocs.org/run' env.chef_executable = '/va
r/lib/gems/1.8/bin/chef-solo' def install_chef(): sudo('apt-get update', pty=True) sudo('apt-get install -y git-core rubygems ruby ruby-dev', pty=True) sudo('gem install chef --no-ri --no-rdoc', pty
=True) def sync_config(): local('rsync -av . %s@%s:/etc/chef' % (env.user, env.hosts[0])) def update(): sync_config() sudo('cd /etc/chef && %s' % env.chef_executable, pty=True) def reload(): "Reload the server." env.user = "docs" run("kill -HUP `cat %s/gunicorn.pid`" % env.rundir, pty=True) def restart(): "Restart (or just start) the server" sudo('restart readthedocs-gunicorn', pty=True)
Tanych/CodeTracking
121-Best-Time-to-Buy-and-Sell-Stock/solution.py
Python
mit
316
0.025316
class Solution(object): def maxProfit(self, prices
): """ :type prices: List[int] :rtype: int """ low=1<<31 profit=0 for p in prices: if p<low: low=p if p-low>profit:
profit=p-low return profit
willowd878/nca47
nca47/db/sqlalchemy/models/dns.py
Python
apache-2.0
1,680
0
import sqlalchemy as sa from oslo_db.sqlalchemy import types as db_types from nca47.db.sqlalchemy.models import base as model_base from nca47.objects import attributes as attr HasTenant = model_base.HasTenant HasId = model_base.HasId HasStatus = model_base.HasStatus HasOperationMode = model_base.HasOperationMode class DnsServer(model_base.BASE, HasId, HasOperationMode): """Represents a dns server.""" name = sa.Column(sa.String(attr.NAME_MAX_LEN)) class Zone(model_base.BASE, HasId, HasOperationMode): """Represents a dns zone.""" __tablename__ = 'dns_zone_info' zone_name = sa.Column(sa.String(attr.NAME_MAX_LEN)) tenant_id = sa.Column(sa.String(attr.NAME_MAX_LEN)) zone_id = sa.Column(sa.String(attr.NAME_MAX_LEN)) vres_id = sa.Column(sa.String(attr.NAME_MAX_LEN)) masters = sa.Column(db_types.JsonEncodedList) slaves = sa.Column(db_types.JsonEncodedList) renewal = sa.Column(sa.String(attr.NAME_MAX_LEN)) default_ttl = sa.Column(sa.String(attr.NAME_MAX_LEN)) owners = sa.Column(db_types.JsonEncodedList) ad_controller = sa.Column(sa.String(attr.NAME_MAX_LEN)) comment = sa.Column(sa.String(attr.NAME_MAX_LEN)) class ZoneRecord(model_base.BASE, HasId, HasOperationMode): """Represents a dns zone.""" __tablename__ = 'dns_rrs_info' zone_id = sa.Column(sa.String(attr
.UUID_LEN)) rrs_id = sa.Column(sa.String(attr.NAME_MAX_LEN)) rrs_name = sa.Column(sa.String(attr.NAME_MAX_LEN)) type = sa.Column(sa.String(attr.NAME_MAX_LEN)) klass = sa.Column(sa.String(att
r.NAME_MAX_LEN)) ttl = sa.Column(sa.String(attr.NAME_MAX_LEN)) rdata = sa.Column(sa.String(attr.NAME_MAX_LEN))
marwano/utile
testsuite/test_xml.py
Python
bsd-3-clause
709
0
from utile import pretty_xml, xml_to_dict, element_to_dict from tests
uite.support import etree, TestCase import unittest XML_DATA = "<html><body><h1>test1</h1><h2>test2</h2></body></html>" XML_PRETTY = """\ <html> <body> <h1>test1</h1> <h2>test2</h2> </body> </html> """ XML_DICT = {'body': {'h2': 'test2', 'h1': 'test1'}} @unittest.skipUnless(etree, 'lxml not installed') class XMLTestCase(TestCase): def test_pretty_xml(self): self.assertEqual(pretty_xml(XML_DATA),
XML_PRETTY) def test_element_to_dict(self): self.assertEqual(element_to_dict(etree.XML(XML_DATA)), XML_DICT) def test_xml_to_dict(self): self.assertEqual(xml_to_dict(XML_DATA), XML_DICT)
t-brandt/acorns-adi
utils/config.py
Python
bsd-2-clause
2,628
0.002664
#!/usr/bin/env python # # Original filename: config.py # # Author: Tim Brandt # Email: tbrandt@astro.princeton.edu # Date: August 2011 # # Summary: Set configuration parameters to sensible values. # import re from subprocess import * import multiprocessing import numpy as np def config(nframes, framesize): ################################################################### # Fetch the total amount of physical system memory in bytes. # This is the second entry on the second line of the standard # output of the 'free' command. ################################################################### print "\nGetting system parameters, setting pipeline execution parameters..." os
ver = Popen(["uname", "-a"], stdout=PIPE).stdout.read() if osver.startswith("Linux"): print "You are running Linux." elif osver.startswith("Darwin"): print "You are running Mac OS-X." else: print "Your operating system is not recognized." if osver.startswith("Linux"):
mem = Popen(["free", "-b"], stdout=PIPE).stdout.read() mem = int(mem.split('\n')[1].split()[1]) elif osver.startswith("Darwin"): mem = Popen(["vm_stat"], stdout=PIPE).stdout.read().split('\n') blocksize = re.search('.*size of ([0-9]+) bytes.*', mem[0]).group(1) totmem = 0. for line in mem: if np.any(["Pages free:" in line, "Pages active:" in line, "Pages inactive:" in line, "Pages speculative:" in line, "Pages wired down:" in line]): totmem += float(line.split(':')[1]) * float(blocksize) mem = int(totmem) ncpus = multiprocessing.cpu_count() hostname = Popen("hostname", stdout=PIPE).stdout.read().split()[0] print "\n You are running on " + hostname + "." print " You have " + str(mem / 2**20) + " megabytes of memory and " + \ str(ncpus) + " threads available." datasize = framesize * nframes * 4 print " The dataset consists of " + str(nframes) + " frames, " + \ str(datasize * 100 / mem) + "% of your physical RAM." storeall = False if datasize * 100 / mem < 20: storeall = True print " --> You have enough RAM to store all data." print " The pipeline will not need to write all intermediate files." else: print " --> You do not have enough RAM to store all data." print " The pipeline will need to write all intermediate files" print " and do the reduction in pieces." return mem, ncpus, storeall
romain-dartigues/ansible
lib/ansible/plugins/httpapi/nxos.py
Python
gpl-3.0
5,290
0.001323
# (c) 2018 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.module_utils._text import to_text from ansible.module_utils.connection import ConnectionError from ansible.module_utils.network.common.utils import to_list from ansible.plugins.httpapi import HttpApiBase from ansible.utils.display import Display display = Display() class HttpApi(HttpApiBase): def _run_queue(self, queue, output): if self._become: display.vvvv('firing event: on_become') queue.insert(0, 'enable') request = request_builder(queue, output) headers = {'Content-Type': 'application/json'} response, response_data = self.connection.send('/ins', request, headers=headers, method='POST') try: response_data = json.loads(to_text(response_data.getvalue())) except ValueError: raise ConnectionError('Response was not valid JSON, got {0}'.format( to_text(response_data.getvalue()) )) results = handle_response(response_data) if self._become: results = results[1:] return results def send_request(self, data, **message_kwargs): output = None queue = list() responses = list() for item in to_list(data): cmd_output = message_kwargs.get('output', 'text') if isinstance(item, dict): command = item['command'] if 'output' in item: cmd_output = item['output'] else: command = item # Emulate '| json' from CLI if command.endswith('| json'): command = command.rsplit('|', 1)[0] cmd_output = 'json' if output and output != cmd_output: responses.extend(self._run_queue(queue, output)) queue = list() output = cmd_output queue.append(command) if queue: responses.extend(self._run_queue(queue, output)) if len(responses) == 1: return responses[0] return responses def edit_config(self, candidate=None, commit=True, replace=None, comment=None): resp = list() operations = self.connection.get_device_operations() self.connection.check_edit_config_capability(operations, candidate, commit, replace, comment) if replace: device_info = self.connection.get_device_info() if '9K' not in device_info.get('network_os_platform', ''): raise ConnectionError(msg=u'replace is supported only on Nexus 9K devices') candidate = 'config replace {0}'.format(replace) responses = self.send_request(candidate, output='config') for response in to_list(responses): if response != '{}': resp.append(response) if not resp: resp = [''] return resp def run_commands(self, commands, check_rc=True): """Runs list of commands on remote device and returns results """ try: out = self.send_request(commands) except ConnectionError as exc: if check_rc is True: raise out = to_text(exc) out = to_list(out) if not out[0]: return out for index, response in enumerate(out): if response[0] == '{': out[index] = json.loads(response) return out def handle_response(response): res
ults = [] if response['ins_api'].get('outputs'): for output in to_list(response['ins_api']
['outputs']['output']): if output['code'] != '200': raise ConnectionError('%s: %s' % (output['input'], output['msg'])) elif 'body' in output: result = output['body'] if isinstance(result, dict): result = json.dumps(result) results.append(result.strip()) return results def request_builder(commands, output, version='1.0', chunk='0', sid=None): """Encodes a NXAPI JSON request message """ output_to_command_type = { 'text': 'cli_show_ascii', 'json': 'cli_show', 'bash': 'bash', 'config': 'cli_conf' } maybe_output = commands[0].split('|')[-1].strip() if maybe_output in output_to_command_type: command_type = output_to_command_type[maybe_output] commands = [command.split('|')[0].strip() for command in commands] else: try: command_type = output_to_command_type[output] except KeyError: msg = 'invalid format, received %s, expected one of %s' % \ (output, ','.join(output_to_command_type.keys())) raise ConnectionError(msg) if isinstance(commands, (list, set, tuple)): commands = ' ;'.join(commands) msg = { 'version': version, 'type': command_type, 'chunk': chunk, 'sid': sid, 'input': commands, 'output_format': 'json' } return json.dumps(dict(ins_api=msg))
thunsaker/cloudpebble
ide/utils/sdk/manifest.py
Python
mit
11,537
0.001127
import json import re import uuid from django.utils.translation import ugettext as _ from ide.utils.project import APPINFO_MANIFEST, PACKAGE_MANIFEST, InvalidProjectArchiveException __author__ = 'katharine' def manifest_name_for_project(project): if project.is_standard_project_type and project.sdk_version == '3': return PACKAGE_MANIFEST else: return APPINFO_MANIFEST def generate_manifest(project, resources): if project.is_standard_project_type: if project.sdk_version == '2': return generate_v2_manifest(project, resources) else: return generate_v3_manifest(project, resources) elif project.project_type == 'pebblejs': return generate_pebblejs_manifest(project, resources) elif project.project_type == 'simplyjs': return generate_simplyjs_manifest(project) else: raise Exception(_("Unknown project type %s") % project.project_type) def generate_v2_manifest(project, resources): return dict_to_pretty_json(generate_v2_manifest_dict(project, resources)) def generate_v3_manifest(project, resources): return dict_to_pretty_json(generate_v3_manifest_dict(project, resources)) def generate_v2_manifest_dict(project, resources): manifest = { 'uuid': str(project.app_uuid), 'shortName': project.app_short_name, 'longName': project.app_long_name, 'companyName': project.app_company_name, 'versionLabel': project.app_version_label, 'versionCode': 1, 'watchapp': { 'watchface': project.app_is_watchface }, 'appKeys': json.loads(project.app_keys), 'resources': generate_resource_dict(project, resources), 'projectType': 'native', 'sdkVersion': "2", } if project.app_capabilities: manifest['capabilities'] = project.app_capabilities.split(',') if project.app_is_shown_on_communication: manifest['watchapp']['onlyShownOnCommunication'] = project.app_is_shown_on_communication return manifest def generate_v3_manifest_dict(project, resources): manifest = { 'name': project.npm_name, 'author': project.app_company_name, 'version': project.semver, 'keywords': project.keywords, 'dependencies': project.get_dependencies(), 'pebble': { 'sdkVersion': project.sdk_version, 'watchapp': { 'watchface': project.app_is_watchface }, 'messageKeys': json.loads(project.app_keys), 'resources': generate_
resource_dict(project, resources), 'projectType': project.project_type } } if project.app_capabilities: manifest['pebble']['capabilities'] = project.app_capabilities.split(',') if project.project_type == 'package': manifest['files'] = ['dist.zip'] else: manifest['pebble']['uuid'] = str(project.app_uuid) manifest['pebble']['enableMultiJS']
= project.app_modern_multi_js manifest['pebble']['displayName'] = project.app_long_name if project.app_is_hidden: manifest['pebble']['watchapp']['hiddenApp'] = project.app_is_hidden if project.app_platforms: manifest['pebble']['targetPlatforms'] = project.app_platform_list return manifest def generate_manifest_dict(project, resources): if project.is_standard_project_type: if project.sdk_version == '2': return generate_v2_manifest_dict(project, resources) else: return generate_v3_manifest_dict(project, resources) elif project.project_type == 'simplyjs': return generate_simplyjs_manifest_dict(project) elif project.project_type == 'pebblejs': return generate_pebblejs_manifest_dict(project, resources) else: raise Exception(_("Unknown project type %s") % project.project_type) def dict_to_pretty_json(d): return json.dumps(d, indent=4, separators=(',', ': '), sort_keys=True) + "\n" def generate_resource_dict(project, resources): if project.is_standard_project_type: return generate_native_resource_dict(project, resources) elif project.project_type == 'simplyjs': return generate_simplyjs_resource_dict() elif project.project_type == 'pebblejs': return generate_pebblejs_resource_dict(resources) else: raise Exception(_("Unknown project type %s") % project.project_type) def generate_native_resource_dict(project, resources): resource_map = {'media': []} for resource in resources: for resource_id in resource.get_identifiers(): d = { 'type': resource.kind, 'file': resource.root_path, 'name': resource_id.resource_id, } if resource_id.character_regex: d['characterRegex'] = resource_id.character_regex if resource_id.tracking: d['trackingAdjust'] = resource_id.tracking if resource_id.memory_format: d['memoryFormat'] = resource_id.memory_format if resource_id.storage_format: d['storageFormat'] = resource_id.storage_format if resource_id.space_optimisation: d['spaceOptimization'] = resource_id.space_optimisation if resource.is_menu_icon: d['menuIcon'] = True if resource_id.compatibility is not None: d['compatibility'] = resource_id.compatibility if project.sdk_version == '3' and resource_id.target_platforms: d['targetPlatforms'] = json.loads(resource_id.target_platforms) resource_map['media'].append(d) return resource_map def generate_simplyjs_resource_dict(): return { "media": [ { "menuIcon": True, "type": "png", "name": "IMAGE_MENU_ICON", "file": "images/menu_icon.png" }, { "type": "png", "name": "IMAGE_LOGO_SPLASH", "file": "images/logo_splash.png" }, { "type": "font", "name": "MONO_FONT_14", "file": "fonts/UbuntuMono-Regular.ttf" } ] } def generate_pebblejs_resource_dict(resources): media = [ { "menuIcon": True, # This must be the first entry; we adjust it later. "type": "bitmap", "name": "IMAGE_MENU_ICON", "file": "images/menu_icon.png" }, { "type": "bitmap", "name": "IMAGE_LOGO_SPLASH", "file": "images/logo_splash.png" }, { "type": "bitmap", "name": "IMAGE_TILE_SPLASH", "file": "images/tile_splash.png" }, { "type": "font", "name": "MONO_FONT_14", "file": "fonts/UbuntuMono-Regular.ttf" } ] for resource in resources: if resource.kind not in ('bitmap', 'png'): continue d = { 'type': resource.kind, 'file': resource.root_path, 'name': re.sub(r'[^A-Z0-9_]', '_', resource.root_path.upper()), } if resource.is_menu_icon: d['menuIcon'] = True del media[0]['menuIcon'] media.append(d) return { 'media': media } def generate_simplyjs_manifest(project): return dict_to_pretty_json(generate_simplyjs_manifest_dict(project)) def generate_simplyjs_manifest_dict(project): manifest = { "uuid": project.app_uuid, "shortName": project.app_short_name, "longName": project.app_long_name, "companyName": project.app_company_name, "versionLabel": project.app_version_label, "versionCode": 1, "capabilities": project.app_capabilities.split(','), "watchapp": { "watchface": project.app_is_watchface }, "appKeys": {}, "resources": generate_simplyjs_resource_dict(), "projectType": "simplyjs" } return manifest def generate_pebblejs_manifest(project, resour
maxwward/SCOPEBak
askbot/migrations/0021_auto__add_field_comment_score.py
Python
gpl-3.0
25,917
0.008604
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Comment.score' db.add_column(u'comment', 'score', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False) def backwards(self, orm): # Deleting field 'Comment.score' db.delete_column(u'comment', 'score') models = { 'askbot.activity': { 'Meta': {'object_name': 'Activity', 'db_table': "u'activity'"}, 'active_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'activity_type': ('django.db.models.fields.SmallIntegerField', [], {}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_auditted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'receiving_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'received_activity'", 'to': "orm['auth.User']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'askbot.anonymou
sproblem': { 'Meta': {'object_name': 'AnonymousProblem'}, 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_addr': ('django.db.models.fields.IPAddressField', [], {'m
ax_length': '15'}), 'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'anonymous_problems'", 'to': "orm['askbot.Exercise']"}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}), 'text': ('django.db.models.fields.TextField', [], {}), 'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'askbot.anonymousexercise': { 'Meta': {'object_name': 'AnonymousExercise'}, 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '180'}), 'tagnames': ('django.db.models.fields.CharField', [], {'max_length': '125'}), 'text': ('django.db.models.fields.TextField', [], {}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}), 'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}) }, 'askbot.problem': { 'Meta': {'object_name': 'Problem', 'db_table': "u'problem'"}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'accepted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'added_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['auth.User']"}), 'comment_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'deleted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'deleted_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'deleted_problems'", 'null': 'True', 'to': "orm['auth.User']"}), 'html': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_edited_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_edited_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'last_edited_problems'", 'null': 'True', 'to': "orm['auth.User']"}), 'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'locked_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'locked_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locked_problems'", 'null': 'True', 'to': "orm['auth.User']"}), 'offensive_flag_count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}), 'exercise': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problems'", 'to': "orm['askbot.Exercise']"}), 'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'text': ('django.db.models.fields.TextField', [], {'null': 'True'}), 'vote_down_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'vote_up_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'wiki': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'wikified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'askbot.problemrevision': { 'Meta': {'object_name': 'ProblemRevision', 'db_table': "u'problem_revision'"}, 'problem': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['askbot.Problem']"}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'problemrevisions'", 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'revised_at': ('django.db.models.fields.DateTimeField', [], {}), 'revision': ('django.db.models.fields.PositiveIntegerField', [], {}), 'summary': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}), 'text': ('django.db.models.fields.TextField', [], {}) }, 'askbot.award': { 'Meta': {'object_name': 'Award', 'db_table': "u'award'"}, 'awarded_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'badge': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_badge'", 'to': "orm['askbot.Badge']"}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notified': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': "orm['auth.User']"}) }, 'askbot.badge': { 'Meta': {'unique_together': "(('name', 'type'),)", 'object_name': 'Badge', 'db_table': "u'badge'"}, 'awarded_count': ('django.db.m
LivingOn/xbmc-script.youtube2kodi
resources/lib/MediaTypes.py
Python
gpl-2.0
5,517
0.005982
# -*- coding=utf8 -*- #****************************************************************************** # MediaTypes.py #------------------------------------------------------------------------------ # # Copyright (c) 2015 LivingOn <LivingOn@xmail.net> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of th
e License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FIT
NESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. #****************************************************************************** import os from resources.lib.SxxExxKennung import SxxExxKennung from resources.lib.YoutubePlaylist import YoutubePlaylist class MediaType(object): _PLUGIN = "plugin://plugin.video.youtube/play/?video_id=%s" _all_strm_files = [] _all_strm_folder = [] def create_strm_files(self): raise NotImplemented @classmethod def activate_all_streams(cls, series_library, movies_library): for strmfile in _get_inactive_strms_in(series_library): _activate_stream_in(strmfile) for strmfile in _get_inactive_strms_in(movies_library): _activate_stream_in(strmfile) _remove_inactive_file_from(series_library) _remove_inactive_file_from(movies_library) @classmethod def exists_inactive_streams(cls, series_library, movies_library): inactive_series = os.path.exists("%sinactive" % series_library) inactive_movies = os.path.exists("%sinactive" % movies_library) return inactive_series or inactive_movies @classmethod def all_strm_folder(cls): all_folder = [] [all_folder.append(i) for i in cls._all_strm_folder if not i in all_folder] return all_folder @classmethod def clear_all_strm_folder(cls): cls._all_strm_folder = [] class NoMediaFile(MediaType): def create_strm_files(self): pass class SingleMediaFile(MediaType): def __init__(self, librarypath, title, videoid, season=None): self._librarypath = librarypath self._title = title self._videoid = videoid self._season = season def create_strm_files(self): folder = _create_strm_folder(self._librarypath, self._title) self._all_strm_folder.append(folder) title = self._season if self._season else self._title strmfile = "%s/%s.strm" % (folder, title) if _write_strm_file(strmfile, MediaType._PLUGIN % self._videoid): _append_to_inactive_file(strmfile, self._librarypath) class PlaylistFile(MediaType): def __init__(self, libraypath, title, playlistid): self._librarypath = libraypath self._title = title self._playlistid = playlistid def create_strm_files(self): folder = _create_strm_folder(self._librarypath, self._title) self._all_strm_folder.append(folder) for (title, videoid) in YoutubePlaylist.parse(self._playlistid): serie = SxxExxKennung.parse(title) if serie: strmfile = "%s/%s.strm" % (folder, serie) if _write_strm_file(strmfile, MediaType._PLUGIN % videoid): _append_to_inactive_file(strmfile, self._librarypath) def _create_strm_folder(librarypath, title): folder = "%s%s" % (librarypath, title) try: os.mkdir(folder) except OSError: pass return folder def _write_strm_file(strmfile, content): result = False entryline = "%s\n" % content if _is_not_in_strm_file(strmfile, entryline): entryline = "#%s" % entryline try: open(strmfile, "a+").write(entryline) result = True except IOError: pass return result def _append_to_inactive_file(strmfile, librarypath): inactive_file = "%sinactive" % librarypath entryline = "%s\n" % strmfile try: open(inactive_file, "a+").write(entryline) except IOError: pass def _get_inactive_strms_in(library): content = [] inactive_file = "%sinactive" % library try: content = open(inactive_file, "rU").readlines() except IOError: pass result = [] [result.append(i.strip("\n")) for i in content if not i in result] return result def _is_not_in_strm_file(strmfile, entryline): content = [] try: content = open(strmfile, "rU").readlines() except IOError: pass return not entryline in content def _activate_stream_in(strmfile): try: content = open(strmfile, "rU").readlines() new_content = [] for line in content: if line.startswith("#plugin:"): new_content.append(line[1:]) else: new_content.append(line) open(strmfile, "w").writelines(new_content) except IOError: pass def _remove_inactive_file_from(library): try: os.remove("%sinactive" % library) except OSError: pass
SUSE/ceph-deploy
ceph_deploy/lib/__init__.py
Python
mit
817
0
""" This module is meant for vendorizing Python libraries. Most libraries will need to have some ``
sys.path`` alterations done unless they are doing relative imports. Do **not** add anything to this module that does not represent a vendorized library. Vendored libraries should go into the ``vendor`` directory and imported from there. This is so we allow libraries that are installed normally to be imported if the vendored module is not available. Th
e import dance here is done so that all other imports throught ceph-deploy are kept the same regardless of where the module comes from. The expected way to import remoto would look like this:: from ceph_deploy.lib import remoto """ try: # vendored from .vendor import remoto except ImportError: # normally installed import remoto # noqa
LLNL/spack
var/spack/repos/builtin/packages/r-pbdzmq/package.py
Python
lgpl-2.1
1,667
0.002999
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RPbdzmq(RPackage): """Programming with Big Data -- Interface to 'ZeroMQ' 'ZeroMQ' is a well-known library for high-performance asynchronous messaging in scalable, distributed applications. This package provides high level R wrapper functions to easily utilize 'ZeroMQ'. We mainly focus on interactive client/server programming frameworks. For convenience, a minimal 'ZeroMQ' library (4.1.0 rc1) is shipped with 'pbdZMQ', which can be used if no system installatio
n of 'ZeroMQ' is available. A few wrapper functions compatible with 'rzmq' are also provided.""" homepage = "http://r-pbd.org/" url = "https://cloud.r-project.org/src/contrib/pbdZMQ_0.2-4.tar.gz" list_url = "https://cloud.r-project.org/src/contrib/Archive/pbdZMQ" version('0.3-4', sha256='07794bd6858e093f8b6b879ddd5ab0195449b47a41b70cab2f60603f0a53b129') version('0.3-3'
, sha256='ae26c13400e2acfb6463ff9b67156847a22ec79f3b53baf65119efaba1636eca') version('0.3-2', sha256='ece2a2881c662f77126e4801ba4e01c991331842b0d636ce5a2b591b9de3fc37') version('0.2-4', sha256='bfacac88b0d4156c70cf63fc4cb9969a950693996901a4fa3dcd59949ec065f6') depends_on('r@3.0.0:', type=('build', 'run')) depends_on('r@3.2.0:', when='@0.2-6:', type=('build', 'run')) depends_on('r@3.5.0:', when='@0.3-4:', type=('build', 'run')) depends_on('r-r6', when='@:0.2-6', type=('build', 'run')) depends_on('libzmq@4.0.4:')
sorenh/cc
vendor/boto/boto/sns/__init__.py
Python
apache-2.0
13,553
0.003394
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.connection import AWSQueryConnection from boto.sdb.regioninfo import SDBRegionInfo import boto try: import json except ImportError: import simplejson as json #boto.set_stream_logger('sns') class SNSConnection(AWSQueryConnection): DefaultRegionName = 'us-east-1' DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com' APIVersion = '2010-03-31' SignatureVersion = '2' def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, is_secure=True, port=None, proxy=None, proxy_port=None, proxy_user=None, proxy_pass=None, debug=0, https_connection_factory=None, region=None, path='/', converter=None): if not region: region = SDBRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint) self.region = region AWSQueryConnection.__init__(self, aws_ac
cess_key_id, aws_secret_access_key, is_secure, port, proxy, proxy_port, proxy_user, proxy_pass, self.region.endpoint, debug, https_connection_factory
, path) def get_all_topics(self, next_token=None): """ :type next_token: string :param next_token: Token returned by the previous call to this method. """ params = {'ContentType' : 'JSON'} if next_token: params['NextToken'] = next_token response = self.make_request('ListTopics', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def get_topic_attributes(self, topic): """ Get attributes of a Topic :type topic: string :param topic: The ARN of the topic. """ params = {'ContentType' : 'JSON', 'TopicArn' : topic} response = self.make_request('GetTopicAttributes', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def add_permission(self, topic, label, account_ids, actions): """ Adds a statement to a topic's access control policy, granting access for the specified AWS accounts to the specified actions. :type topic: string :param topic: The ARN of the topic. :type label: string :param label: A unique identifier for the new policy statement. :type account_ids: list of strings :param account_ids: The AWS account ids of the users who will be give access to the specified actions. :type actions: list of strings :param actions: The actions you want to allow for each of the specified principal(s). """ params = {'ContentType' : 'JSON', 'TopicArn' : topic, 'Label' : label} self.build_list_params(params, account_ids, 'AWSAccountId') self.build_list_params(params, actions, 'ActionName') response = self.make_request('AddPermission', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def remove_permission(self, topic, label): """ Removes a statement from a topic's access control policy. :type topic: string :param topic: The ARN of the topic. :type label: string :param label: A unique identifier for the policy statement to be removed. """ params = {'ContentType' : 'JSON', 'TopicArn' : topic, 'Label' : label} response = self.make_request('RemovePermission', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def create_topic(self, topic): """ Create a new Topic. :type topic: string :param topic: The name of the new topic. """ params = {'ContentType' : 'JSON', 'Name' : topic} response = self.make_request('CreateTopic', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def delete_topic(self, topic): """ Delete an existing topic :type topic: string :param topic: The ARN of the topic """ params = {'ContentType' : 'JSON', 'TopicArn' : topic} response = self.make_request('DeleteTopic', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body) def publish(self, topic, message, subject=None): """ Get properties of a Topic :type topic: string :param topic: The ARN of the new topic. :type message: string :param message: The message you want to send to the topic. Messages must be UTF-8 encoded strings and be at most 4KB in size. :type subject: string :param subject: Optional parameter to be used as the "Subject" line of the email notifications. """ params = {'ContentType' : 'JSON', 'TopicArn' : topic, 'Message' : message} if subject: params['Subject'] = subject response = self.make_request('Publish', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body)
mph55/lanstation13
tools/bot/vgstation/common/config.py
Python
gpl-3.0
1,605
0.011838
''' Created on Jul 28, 2013 @author: Rob ''' import os, yaml config = { 'names': [ 'NT', 'VGTestServer' ], 'servers':{ 'irc.server.tld': { 'port':6667, 'password':None, 'channels':{ '#vgstation': { 'nudges':True, 'status':True } } } }, 'plugins': { 'redmine': { 'url': '', 'apikey':'' }, 'nudge': { 'hostname': '', 'port': 45678, 'key': 'passwordgoeshere' } } } def ReadFromDisk(): global config config_file = 'config.yml' if not os.path.isfile(config_file
): with open(config_file, 'w') as cw: yaml.dump(config, cw, default_flow_style=False)
with open(config_file, 'r') as cr: config = yaml.load(cr) # if config['database']['username'] == '' or config['database']['password'] == '' or config['database']['schema'] == '': # print('!!! Default config.yml detected. Please edit it before continuing.') # sys.exit(1) def get(key,default=None): global config try: parts = key.split('.') value = config[parts[0]] if len(parts) == 1: return value for part in parts[1:]: value = value[part] return value except KeyError: return default
vially/googlemusic-xbmc
resources/Lib/gmusicapi/clients/__init__.py
Python
gpl-3.0
323
0.01548
# -*- coding: utf-8 -*- #from __future__ import print_function, division, absolute_import
, unicode_literals #from gmusicapi.clients.webclient import Webclient #from gmusicapi.clients.musicmanager import Musicmanager from gmusicapi.clients.
mobileclient import Mobileclient #(Webclient, Musicmanager, Mobileclient) # noqa
NendoTaka/CodeForReference
Codingame/Python/Clash/SortHighLowReverse.py
Python
mit
112
0.017857
l = [] for x in range(int(input())): l.append(int(input())) l.sort() print(' '.join(str(x) for x in l[
::-1]))
globocom/database-as-a-service
dbaas/maintenance/migrations/0051_auto__add_removeinstancedatabase.py
Python
bsd-3-clause
78,775
0.007553
# -*- coding: utf-8 -*- from south.utils import datetime_utils as datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'RemoveInstanceDatabase' db.create_table(u'maintenance_removeinstancedatabase', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('current_step', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0)), ('status', self.gf('django.db.models.fields.IntegerField')(default=0)), ('started_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('finished_at', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('can_do_retry', self.gf('django.db.models.fields.BooleanField')(default=True)), ('task_schedule', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name=u'maintenance_removeinstancedatabase_related', null=True, to=orm['maintenance.TaskSchedule'])), ('task', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'remove_instances_database_manager', to=orm['notification.TaskHistory'])), ('database', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'remove_instances_database_manager', to=orm['logical.Database'])), ('instance', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'remove_instances_database_manager', to=orm['physical.Instance'])), )) db.send_create_signal(u'maintenance', ['RemoveInstanceDatabase']) def backwards(self, orm): # Deleting model 'RemoveInstanceDatabase' db.delete_table(u'maintenance_removeinstancedatabase') models = { u'account.organization': { 'Meta': {'object_name': 'Organization'}, 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'account.team': { 'Meta': {'ordering': "[u'name']", 'object_name': 'Team'}, 'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}), 'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'backup.backupgroup': { 'Meta': {'object_name': 'BackupGroup'}, 'created_at': ('djan
go.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}) }, u'backup.snapshot': { 'Meta': {'object_name': 'Snapshot'}, 'cre
ated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'database_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}), 'end_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'backup_environment'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Environment']"}), 'error': ('django.db.models.fields.CharField', [],
IAlwaysBeCoding/mrq
tests/tasks/mongodb.py
Python
mit
381
0
from mrq.task import Task from mrq.context import connections class MongoTimeout(Task): def run(self, params): res = connections.mongodb_jobs.eval(""" function() { var a; for (i=0;i<10000000;i++) { 
for (y=0;y<10000000;y++) { a = Math.max(y); } } return a; } """) return res
OCA/vertical-abbey
mass/base_config_settings.py
Python
agpl-3.0
793
0
# -*- coding: utf-8 -*- # Copyright 2017-2019 Barroux Abbey (www.barroux.org) # Copyright 2017-2019 Akretion France (www.akretion.com) # @author: Alexis de Lattre <alexis.delattre@akretion.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo import models, fields class Ba
seConfigSettings(models.TransientModel): _inherit = 'base.config.settings' mass_validation_account_id = fields.Many2one( related='company_id.mass_validation_account_id') mass_validation_analytic_account_id = fields.Many2one( related='company_id.mass_validation_analytic_account_id') mass_validation_journal_id = fields.Many2one
( related='company_id.mass_validation_journal_id') mass_post_move = fields.Boolean(related='company_id.mass_post_move')
mkalte666/Dragonflame
IrcClient.py
Python
mit
3,540
0.05113
# coding=utf-8 import socket import thread import time import Queue import re import random class IrcClient: def __init__(self, host, port, nick, realname, printAll=True, isMibbitBot=False): self.nick = nick self.realname = realname self.host = host self.port = port self.sock = socket.socket() self.RecvQueue = Queue.Queue() self.SendQueue = Queue.Queue() self.printAll = printAll self.EventHandlers = [] self.ignoredNicks = [] self.channels = [] self.sock.connect((host,port)) thread.start_new_thread(self.RecieveWorker, ()) thread.start_new_thread(self.SendWorker, ()) thread.start_new_thread(self.EventWorker, ()) self.RegisterEventHandler(self.PingEventHandler) self.WaitForSilence() self.Send("USER "+self.nick+" 0 * :"+self.realname) self.Send("NICK "+self.nick) self.WaitForSilence() def RecieveWorker(self): recvbuffer = "" c = "" while True: c = self.sock.recv(1) if c=='\n': if self.printAll == True: print("RECV: "+recvbuffer) self.RecvQueue.put(recvbuffer) recvbuffer = "" else: recvbuffer += c def SendWorker(self): while True: toSend = self.SendQueue.get() if self.printAll == True: print("SEND: "+toSend) self.sock.send(toSend) def EventWorker(self): while True: recvItem = self.RecvQueue.get() prefix = "" command = "" params = "" trailing = "" expression = re.compile(ur':([\w!.@-]*) {0,1}([A-Za-z0-9]*) {0,1}([\w# ]*) {0,1}:{0,1}(.*)') match = re.search(expression, recvItem) if match != None: prefix = match.group(1) command = match.group(2) params = match.group(3) trailing = match.group(4) for func in self.EventHandlers: try: func(self, recvItem, prefix, command, params, trailing) except: print("WARNING: Error in handler function!") pass def WaitForSilence(self, maxIterations=10, delay=0.2): time.sleep(delay) while self.RecvQueue.empty != True: time.sleep(delay) maxIterations -= 1; if maxIterations <= 0: break; pass; def RegisterEventHandler(self, func): self.EventHandlers.append(func) def RemoveEventHandler(self, func): try: self.EventHandlers.remove(func) except: print("WARNING: tried to remove unknown handler!") pass def Send(self, cmd): self.SendQueue.put(cmd+'\n') def PingEventHandler(self, client, event, prefix, command, params, trailing): if event[:4] == "PING":
self.Send("PONG"+event[4:]) def SendMessage(self, destination, message): self.Send("PRIVMSG "+destination+" :"+message) def BroadcastMessage(self, message): for channel in self.channels: self.SendMessage(channel, message) def SetNick(self, nickn
ame): self.Send("NICK "+nickname) def JoinChannel(self, channelname, channelpassword=""): self.Send("JOIN "+channelname+" "+channelpassword) self.channels.append(channelname) def LeaveChannel(self, channelname): self.Send("PART "+channelname) try: self.channels.remove(channelname) except: print("WARNING: Tried to leave channel "+channelname+", but you arent in that channel!") pass def AddIgnore(self, name): self.ignoredNicks.append(name) def RemoveIgnore(self, name): try: self.ignoredNicks.remove(name) except: print("WARNING: You didnt ignore "+name+" in the first place!") pass def IsIgnored(self, name): if name in self.ignoredNicks: return True else: return False def Identify(self, password): self.SendMessage("nickserv", "identify "+password)
benian/aecg100
setup.py
Python
mit
418
0.023923
#!/usr/bin/env python im
port setuptools if __name__ == "__main__": setuptools.setup( name="aecg100", version="1.1.0.18", author="WHALETEQ Co., LTD", description="WHALETEQ Co., LTD AECG100 Linux SDK
", url="https://www.whaleteq.com/en/Support/Download/7/Linux%20SDK", include_package_data=True, package_data={ '': ['sdk/*.so', 'sdk/*.h', 'sample/python/*.txt'] }, )
lhupfeldt/jenkinsflow
test/prefix_test.py
Python
bsd-3-clause
1,934
0.003619
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT # All rights reserved. This work is under a BSD license, see LICENSE.TXT. from jenkinsflow.flow import serial from .framework import ap
i_select prefixed_jobs = """ serial flow: [ job: 'top_quick1' serial flow: [ job: 'top_x_quick2-1' ] serial flow: [ job: 'top_x_quick2-2' ] serial flow: [ job: 'top_x_quick2-3' ] job: 'top_quick3' parallel flow: ( serial flow: [ job: 'top_y
_z_quick4a' ] serial flow: [ job: 'quick4b' ] job: 'top_y_quick5' ) ] """ def test_prefix(api_type, capsys): with api_select.api(__file__, api_type) as api: def job(name): api.job(name, max_fails=0, expect_invocations=0, expect_order=None, params=None) api.flow_job() job('quick1') index = 0 for index in 1, 2, 3: job('x_quick2-' + str(index)) job('quick3') job('y_z_quick4') job('y_quick5') with serial(api, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1: ctrl1.invoke('quick1') for index in 1, 2, 3: with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2: ctrl2.invoke('quick2-' + str(index)) ctrl1.invoke('quick3') with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2: with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a: ctrl3a.invoke('quick4a') # Reset prefix with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b: ctrl3b.invoke('quick4b') ctrl2.invoke('quick5') sout, _ = capsys.readouterr() assert prefixed_jobs.strip() in sout
Jimdo/unattended-upgrades
test/test_logdir.py
Python
gpl-2.0
1,396
0.002149
#!/usr/bin/python import apt_pkg import logging import os import mock import sys import tempfile import unittest sys.path.insert(0, "..") from
unattended_upgrade import _setup_logging class MockOptions: dry_run = False debug = False class TestLogdir(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() apt_pkg.init() self.mock_options = MockOptions() def test_logdir(self): # test log logdir = os.pat
h.join(self.tempdir, "mylog") apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir) logging.root.handlers = [] _setup_logging(self.mock_options) self.assertTrue(os.path.exists(logdir)) def test_logdir_depreated(self): # test if the deprecated APT::UnattendedUpgrades dir is not used # if the new UnaUnattendedUpgrades::LogDir is given logdir = os.path.join(self.tempdir, "mylog-use") logdir2 = os.path.join(self.tempdir, "mylog-dontuse") apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir) apt_pkg.config.set("APT::UnattendedUpgrades::LogDir", logdir2) logging.root.handlers = [] _setup_logging(self.mock_options) self.assertTrue(os.path.exists(logdir)) self.assertFalse(os.path.exists(logdir2)) if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) unittest.main()
NendoTaka/CodeForReference
Python/Sort/CountingSort.py
Python
mit
511
0.007828
def countingsort(sortablelist): maxval = max(sortablelist) m = maxval + 1 count = [0] * m # init with zeros for a in sortablelist: count[a] += 1
# count occurences i = 0 for a in range(m): # emit for c in range(count[a]): # - emit 'count[a]' copies of 'a' sortablelist[i] = a i += 1 def main(): import random a = [random.randint(0, 1000) for i in range(100)] countingsort(a) print (a) main()
soerendip42/rdkit
Code/GraphMol/ReducedGraphs/Wrap/testReducedGraphs.py
Python
bsd-3-clause
1,470
0.014286
# $Id$ # from rdkit import Chem from rdkit.Chem import rdReducedGraphs as rdRG from rdkit import RDConfig import numpy import unittest class TestCase(unittest.TestCase) : def setUp(self): pass def test1(self): m = Chem.MolFromSmiles('OCCc1ccccc1') mrg = rdRG.GenerateMolExtendedReducedGraph(m) mrg.UpdatePropertyCache(False) self.failUnlessEqual('[*]cCCO',Chem.MolToSmiles(mrg)) m = Chem.MolFromSmiles('OCCC1CCCCC1') mrg = rdRG.GenerateMolExtendedReducedGraph(m) mrg.UpdatePropertyCache(False) self.failUnlessEqual('[*]CCCO',Chem.MolToSmiles(mrg)) def test2(self): m = Chem.MolFromSmiles('OCCc1ccccc1') mrg = rdRG.GenerateMolExtended
ReducedGraph(m) mrg.UpdatePropertyCache(False) self.failUnlessEq
ual('[*]cCCO',Chem.MolToSmiles(mrg)) fp1 = rdRG.GenerateErGFingerprintForReducedGraph(mrg) fp2 = rdRG.GetErGFingerprint(m) md = max(abs(fp1-fp2)) self.failUnless(md<1e-4) def test3(self): m = Chem.MolFromSmiles('OCCc1ccccc1') fp1 = rdRG.GetErGFingerprint(m) m = Chem.MolFromSmiles('OCCC1CC=CC=C1') fp2 = rdRG.GetErGFingerprint(m) md = max(abs(fp1-fp2)) self.failUnlessAlmostEqual(0.0,md,4) def test4(self): m = Chem.MolFromSmiles('OCCc1ccccc1') fp1 = rdRG.GetErGFingerprint(m) fp2 = rdRG.GetErGFingerprint(m,fuzzIncrement=0.1) md = max(abs(fp1-fp2)) self.failUnlessAlmostEqual(0.2,md,4) if __name__ == '__main__': unittest.main()
iw3hxn/LibrERP
stock_inventory_export/__openerp__.py
Python
agpl-3.0
541
0
# -*- encoding: utf-8 -
*- { 'name': 'Export Inventory Costs', 'version': '3.0.0.0', 'category': "Warehouse Management", 'description': """ Export Inventory Costs """, 'author': 'Didotech SRL', 'website': 'http://www.didotech.com', 'lice
nse': 'AGPL-3', "depends": [ 'base', 'stock', ], "data": [ 'wizard/wizard_inventory_costs_view.xml', 'views/stock_view.xml' ], "demo": [], "active": False, "installable": True, "application": True, }
graik/biskit
archive_biskit2/Biskit/AmberEntropyMaster.py
Python
gpl-3.0
25,638
0.017396
## numpy-oldnumeric calls replaced by custom script; 09/06/2016 ## Automatically adapted for numpy-oldnumeric Mar 26, 2007 by alter_code1.py ## ## Biskit, a toolkit for the manipulation of macromolecular structures ## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You find a copy of the GNU General Public License in the file ## license.txt along with this program; if not, write to the Free ## Software Foundation, Inc., 675 Mass Ave, Cambridge,
MA 02139, USA. ## ## """ Parallellized AmberEntropist calculation. """ import os.path, copy import Biskit.oldnumeric as N0 import Biskit.tools as T import Biskit.settings as settings import Biskit.mathUtils as MU from Biskit.PVM.TrackingJobMaster import TrackingJobMaster from Biskit.PVM.hosts import cpus_all, nice_dic from Biskit import PDBModel, PDBProfiles
, EHandler, StdLog from Biskit.Dock import Complex slave_path = T.projectRoot()+"/Biskit/AmberEntropySlave.py" class AmberEntropyMaster(TrackingJobMaster): """ Run many AmberEntropist calculations on many nodes. The Master has a standard set of 13 protocols to run on rec, lig, and com trajectories, as well as on every single member trajectory - in total 113. It accepts one variable parameter, e.g. s(tart). Each protocol is then run for all values of the variable parameter. A protocol is simply a set of options that are passed on to the AmberEntropist (which is run from within AmberEntropySlave). Comparing the different protocols allows to more or less separate random from real correlations, rigid body from intermolecular vibrations, etc. Results are put into a tree-shaped dictionary of dictionaries. The first dimension/key is the member index -- None for the complete ensemble trajectory, 0 for the first member, etc. The second dimension/key is the name of the protocol, e.g. 'com_split' for the complex trajectory with seperately fitted receptor and ligand. The last dimension contains the different values obtained from the ptraj run, e.g. 'S_total' points to the total entropy in cal/mol/K, 'contributions' to the entropy contribution of each mode, 'T' to the assumed temperature, 'vibes' gives the number of vibrations with too low frequencies (according to ptraj). All these are lists of values - one for each value of the variable option. Example:: * r[None]['fcom']['S_vibes'][0] -> float first vibr. Entropy of free fake complex for complete ensemble * r[0]['com']['S_total'] -> [ float, float, .. ] the total entropies of the complex calculated for the first ensemble member and the different values of the variable option """ def __init__(self, rec=None, lig=None, com=None, out=None, cr=None, var='s', vrange=[0], jack=0, zfilter=None, clean=0, all=1, exrec=[], exlig=[], excom=[], hosts=cpus_all, niceness=nice_dic, w=0, a=1, debug=0, restart=0, **kw ): """ @param rec: free rec trajectory [required] @type rec: str @param lig: free lig trajectory [required] @type lig: str @param com: complex trajectory [required] @type com: str @param out: file name for pickled result [required] @type out: str @param cr: chains of receptor in complex trajectory [n_chains rec] @type cr: [int] @param var: name of variable option [ s ] @type var: str @param vrange: set of values used for variable option OR 'start:stop:step', string convertable to range() input @type vrange: [any] @param jack: set up leave-one-trajectory-out jackknife test (default: 0) (replaces var with 'ex1' and vrange with range(1,n_members+1)) @type jack: [0|1] @param zfilter: kick out outlyer trajectories using z-score threshold on RMSD trace (default: None->don't) @type zfilter: float @param clean: remove pickled ref models and member trajectories (default: 0) @type clean: 0|1 @param all: skip single member trajs (default: 1) @type all: 0|1 @param exrec: exclude certain members of receptor ensemble [[]] @type exrec: [int] @param exlig: exclude certain members of ligand ensemble [[]] @type exlig: [int] @param excom: exclude certain members of complex ensemble [[]] @type excom: [int] @param hosts: nodes to be used (default: all known) @type hosts: [str] @param debug: don't delete output files (default: 0) @type debug: 1|0 @param kw: additional key=value parameters for AmberEntropist, AmberCrdEntropist, Executor and Master. @type kw: key=value pairs :: ... parameters for AmberEntropist cast - 1|0, equalize free and bound atom content [1] s,e - int, start and stop frame [0, to end] atoms - [ str ], names of atoms to consider [all] protein - 1|0, remove non-protein atoms [0..don't] step - int, frame offset [no offset] thin - float, use randomly distributed fraction of frames [all] (similar to step but perhaps better for entropy calculations) ex - [int] OR ([int],[int]), exclude member trajectories [[]] ex_n - int, exclude last n members OR... [None] ex3 - int, exclude |ex3|rd tripple of trajectories [0] (index starts with 1! 0 to exclude nothing) ... parameters for AmberCrdEntropist f_template - str, alternative ptraj input template [default] ... parameters for Executor: log - Biskit.LogFile, program log (None->STOUT) [None] verbose - 0|1, print progress messages to log [log != STDOUT] ... parameters for Master w - 0|1, show X window for each slave [0] a - 0|1, add hosts to PVM [1] """ ## normal and error output self.fout = T.absfile( out ) self.ferror = os.path.dirname(self.fout) +'/AmberEntropy_errors.log' self.debug = debug self.log = StdLog() ## input files and variable option self.rec = T.absfile( rec, 0 ) self.lig = T.absfile( lig, 0 ) self.com = T.absfile( com, 0 ) self.cr = cr self.cl = None self.var = var self.vrange = self.__vrange( vrange ) self.jack = jack self.zfilter = zfilter self.n_members = None self.clean = clean self.all = all ## members to exclude, outliers will be added if zfilter is not None self.ex_frec = exrec self.ex_flig = exlig self.ex_com = excom ## reserve for loaded reference models self.ref_frec = self.ref_flig = None self.ref_brec = self.ref_blig = self.ref_com = None ## reserve for extracted member trajectories self.members_frec = self.members_flig = [] self.members_brec = self.members_blig = [] ## options to be passed on to AmberEntropist self.options = kw if not restart: ## Load trajector
SmileEric/SEIMS
preprocess/config.py
Python
gpl-2.0
6,003
0.01266
#! /usr/bin/env python #coding=utf-8 ## @Configuration of Preprocessing for SEIMS # # TODO, give more detailed description here. import os,platform ## Directionaries if platform.system() == "Windows": DATA_BASE_DIR = r'E:\github-zlj\model_data\model_dianbu_30m_longterm\data_prepare' PREPROC_SCRIPT_DIR = r'E:\github-zlj\SEIMS\preprocess' CPP_PROGRAM_DIR = r'E:\github-zlj\SEIMS_Preprocess\Debug' METIS_DIR = r'E:\github-zlj\SEIMS_Preprocess\metis\programs\Debug' MPIEXEC_DIR = None elif platform.system() == "Linux": DATA_BASE_DIR = r'/data/liujz/data' PREPROC_SCRIPT_DIR = r'/data/hydro_preprocessing' CPP_PROGRAM_DIR = r'/data/hydro_preprocessing/cpp_programs' METIS_DIR = r'/soft/programming/metis-5.1.0/build/programs' MPIEXEC_DIR = None CLIMATE_DATA_DIR = DATA_BASE_DIR + os.sep + 'climate' SPATIAL_DATA_DIR = DATA_BASE_DIR + os.sep + 'spatial' WORKING_DIR = DATA_BASE_DIR + os.sep + 'output' ## MongoDB related #HOSTNAME = '192.168.6.55' HOSTNAME = '127.0.0.1' PORT = 27017 ClimateDBName = 'climate_dianbu' SpatialDBName = 'model_dianbu_30m_longterm' forCluster = False stormMode = False if forCluster and 'cluster_' not in SpatialDBName.lower(): SpatialDBName = 'cluster_' + SpatialDBName ## Climate Input PrecSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Preci_dianbu_Vor.shp' if stormMode: PrecStormSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Preci_dianbu_Vor_storm.shp' MeteorSitesVorShp = CLIMATE_DATA_DIR + os.sep + 'shp' + os.sep + 'Metero_hefei_Vor.shp' PrecExcelPrefix = CLIMATE_DATA_DIR + os.sep + 'precipitation_by_day_' PrecDataYear = [2014] MeteoVarFile = CLIMATE_DATA_DIR + os.sep + 'Variables.txt' MeteoDailyFile = CLIMATE_DATA_DIR + os.sep+ 'meteorology_dianbu_daily.txt' MetroSiteFile = CLIMATE_DATA_DIR + os.sep + 'sites_hefei.txt' DischargeExcelPrefix = CLIMATE_DATA_DIR + os.sep + 'discharge_by_day_' DischargeYear = [2014] ## Parameters for SEIMS sqliteFile = DATA_BASE_DIR + os.sep + "Parameter.db3" ## Spatial Input dem = SPATIAL_DATA_DIR + os.sep + 'dem_30m.tif' outlet_file = SPATIAL_DATA_DIR + os.sep + 'outlet_30m.shp' threshold = 0 # threshold for stream extraction from D8-flow accumulation weighted Peuker-Douglas stream sources # if threshold is 0, then Drop Analysis is used to select the optimal value. np = 4 # number of parallel processors landuseFile = SPATIAL_DATA_DIR + os.sep + 'landuse_30m.tif' sandList = [] clayList = [] orgList = [] for i in [1,2]: sandFile = SPATIAL_DATA_DIR + os.sep + "sand" + str(i) + ".tif" clayFile = SPATIAL_DATA_DIR + os.sep + "clay" + str(i) + ".tif" orgFile = SPATIAL_DATA_DIR + os.sep + "org" + str(i) + ".tif" sandList.append(sandFile) clayList.append(clayFile) orgList.append(orgFile) defaultSand = 40 defaultClay = 30 defaultOrg = 2.5 ## Predefined variables CROP_FILE = PREPROC_SCRIPT_DIR + os.sep + 'crop.txt' CROP_ATTR_LIST = ["IDC", "EXT_COEF", "BMX_TREES", "BLAI", "HVSTI",\ "MAT_YRS", "T_BASE", "FRGRW1", "FRGRW2", "LAIMX1",\ "LAIMX2", "DLAI", "BN1", "BN2", "BN3", "BP1", "BP2",\ "BP3", "BIO_E", "BIOEHI", "CO2HI", "WAVP", "BIO_LEAF",\ "RDMX","CNYLD", "CPYLD", "WSYF", "DLAI", "T_OPT"] # LANDUSE_ATTR_LIST and SOIL_ATTR_LIST is selected from sqliteFile database LANDUSE_ATTR_LIST = ["Manning", "Interc_max", "Interc_min", "RootDepth", \ "USLE_C", "SOIL_T10","USLE_P"] LANDUSE_ATTR_DB = ["manning","i_max","i_min", "root_depth", "usle_c", "SOIL_T10"] ## Be caution, the sequence from "Sand" to "Poreindex" if fixed because of soil_param.py. SOIL_ATTR_LIST = ["Sand", "Clay", "WiltingPoint", "FieldCap", "Porosity","Density",\ "Conductivity", "Poreindex", "USLE_K", "Residual", ] SOIL_ATTR_DB = ["sand", "clay","wp", "fc", "porosity","B_DENSITY","ks", "P_INDEX",\ "usle_k", "rm"] ### There are 15 attributes in SoilLookup table now. ### They are [SOILCODE], [SNAM], [KS](Conductivity), [POROSITY], [FC](field capacity), [P_INDEX](Poreindex), [RM], ### [WP](wiltingpoint), [B_DENSITY], [SAND], [CLAY], [SILT], [USLE_K], [TEXTURE], [HG] ## Hydrological parameters coeTable = {"T2":[0.05, 0.48],"T10":[0.12, 0.52], "T100":[0.18,0.55]} ## used in radius.py ## Conventional Spatial Raster Data File Names filledDem = "demFilledTau.tif" flowDir = "flowDirTauD8.tif" slope = "slopeTau.tif" acc = "accTauD8.tif" streamRaster = "streamRasterTau.tif" flowDirDinf = "flowDirDinfTau.tif" dirCodeDinf = "dirCodeDinfTau.tif" slopeDinf = "slopeDinfTau.tif" weightDinf = "weightDinfTau.tif" modifiedOutlet = "outletM.shp" streamSkeleton = "streamSkeleton.tif" streamOrder = "streamOrderTau.tif" chNetwork = "chNetwork.txt" chCoord = "chCoord.txt" streamNet = "streamNet.shp" subbasin = "subbasinTau.tif" mask_to_ext = "mask.tif" ## masked file names subbasinM = "subbasinTauM.tif" flowDirM = "flowDirTauM.tif" streamRasterM = "streamRasterTauM.tif" ## output to mongoDB file names reachesOut = "reach.shp" subbasinOut = "subbasin.tif" flowDirOut = "flow_d
ir.tif" streamLinkOut = "stream_link.tif" ## masked and output to mongoDB file names slopeM = "slope.tif" filldemM = "dem.tif" accM = "acc.tif" streamOrderM = "stream_order.tif" flowDirDinfM = "flow_dir_angle_dinf.tif" dirCodeDinfM = "flow_dir_dinf.tif" slopeD
infM = "slope_dinf.tif" weightDinfM = "weight_dinf.tif" subbasinVec = "subbasin.shp" basinVec = "basin.shp" chwidthName = "chwidth.tif" landuseMFile = "landuse.tif" soilTexture = "soil_texture.tif" hydroGroup = "hydro_group.tif" usleK = "usle_k.tif" initSoilMoist = "moist_in.tif" depressionFile = "depression.tif" CN2File = "CN2.tif" radiusFile = "radius.tif" ManningFile = "Manning.tif" velocityFile = "velocity.tif" ## flow time to the main river from each grid cell t0_sFile = "t0_s.tif" ## standard deviation of t0_s delta_sFile = "delta_s.tif" ## potential runoff coefficient runoff_coefFile = "runoff_co.tif"
leapcode/bitmask-dev
src/leap/bitmask/mail/outgoing/service.py
Python
gpl-3.0
17,108
0
# -*- coding: utf-8 -*- # outgoing/service.py # Copyright (C) 2013-2017 LEAP # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ OutgoingMail module. The OutgoingMail class allows to send mail, and encrypts/signs it if needed. """ import re from StringIO import StringIO from copy import deepcopy from email.parser import Parser from email.encoders import encode_7or8bit from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from twisted.mail import smtp from twisted.internet import defer from twisted.python.failure import Failure from twisted.logger import Logger from leap.common.check import leap_assert_type, leap_assert from leap.common.events import emit_async, catalog from leap.bitmask.keymanager.errors import KeyNotFound, KeyAddressMismatch from leap.bitmask.mail.utils import validate_address from leap.bitmask.mail.rfc3156 import MultipartEncrypted from leap.bitmask.mail.rfc3156 import MultipartSigned from leap.bitmask.mail.rfc3156 import encode_base64_rec from leap.bitmask.mail.rfc3156 import RFC3156CompliantGenerator from leap.bitmask.mail.rfc3156 import PGPSignature from leap.bitmask.mail.rfc3156 import PGPEncrypted # TODO # [ ] rename this module to something else, service should be the implementor # of IService class OutgoingMail(object): """ Sends Outgoing Mail, encrypting and signing if needed. """ log = Logger() def __init__(self, from_address, keymanager, bouncer=None): """ Initialize the outgoing mail service. :param from_address: The sender address. :type from_address: str :param keymanager: A KeyManager for retrieving recipient's keys. :type keymanager: leap.common.keymanager.KeyManager """ # assert params leap_assert_type(from_address, (str, unicode)) leap_assert('@' in from_address) # XXX it can be a zope.proxy too # leap_assert_type(keymanager, KeyManager) self._from_address = from_address self._keymanager = keymanager self._bouncer = bouncer self._senders = [] def add_sender(self, sender): """ Add an ISender to the outgoing service """ self._senders.append(sender) def send_message(self, raw, recipient): """ Sends a message to a recipient. Maybe encrypts and signs. :param raw: The raw message :type raw: str :param recipient: The recipient for the message :type recipient: smtp.User :return: a deferred which delivers the message when fired """ d = self._maybe_encrypt_and_sign(raw, recipient) d.addCallback(self._route_msg, recipient, raw) d.addErrback(self.sendError, raw) return d def can_encrypt_for(self, recipient): def cb(_): return True def eb(failure): failure.trap(KeyNotFound) return False d = self._keymanager.get_key(recipient) d.addCallbacks(cb, eb) return d def sendSuccess(self, dest_addrstr): """ Callback for a successful send. """ fromaddr = self._from_address self.log.info('Message sent from %s to %s' % (fromaddr, dest_addrstr)) emit_async(catalog.SMTP_SEND_MESSAGE_SUCCESS, fromaddr, dest_addrstr) def sendError(self, failure, origmsg): """ Callback for an unsuccessful send. :param failure: The result from the last errback. :type failure: anything :param origmsg: the original, unencrypted, raw message, to be passed to the bouncer. :type origmsg: str """ # XXX: need to get the address from the original message to send signal # emit_async(catalog.SMTP_SEND_MESSAGE_ERROR, self._from_address, # self._user.dest.addrstr) # TODO when we implement outgoing queues/long-term-retries, we could # examine the error *here* and delay the notification if it's just a # temporal error. We might want to notify the permanent errors # differently. self.log.error('Error while sending: {0!r}'.format(failure)) if self._bouncer: self._bouncer.bounce_message( failure.getErrorMessage(), to=self._from_address, orig=origmsg) else: failure.raiseException() def _route_msg(self, encrypt_and_sign_result, recipient, raw): """ Sends the msg using the ESMTPSenderFactory. :param encrypt_and_sign_result: A tuple containing the 'maybe' encrypted message and the recipient :type encrypt_and_sign_result: tuple """ message, recipient = encrypt_and_sign_result msg = message.as_string(False) d = None for sender in self._senders: if sender.can_send(recipient.dest.addrstr): self.log.debug('Sending message to %s with: %s' % (recipient, str(sender))) d = sender.send(recipient, msg) break if d is None: return self.sendError(Failure(), raw) emit_async(catalog.SMTP_SEND_MESSAGE_START, self._from_address, recipient.dest.addrstr) d.addCallback(self.sendSuccess) d.addErrback(self.sendError, raw) return d def _maybe_encrypt_and_sign(self, raw, recipient, fetch_remote=True): """ Attempt to encrypt and sign the outgoing message. The behaviour of this method depends on: 1. the original message's content-type, and 2. the availability of the recipient's public key. If the original message's content-type is "multipart/encrypted", the
n the original message is not altered. For any other content-type, the method attempts to fetch the recipient's public key. If the recipient's public key is available, the message is encrypted and signed; otherwise it is only signed. Note that, if the C{encrypted_only} configuration is set to True and the recipient's public key is not available, then the recipient address would have been rejected in SMTPDelivery.validateTo(). The following
table summarizes the overall behaviour of the gateway: +---------------------------------------------------+----------------+ | content-type | rcpt pubkey | enforce encr. | action | +---------------------+-------------+---------------+----------------+ | multipart/encrypted | any | any | pass | | other | available | any | encrypt + sign | | other | unavailable | yes | reject | | other | unavailable | no | sign | +---------------------+-------------+---------------+----------------+ :param raw: The raw message :type raw: str :param recipient: The recipient for the message :type: recipient: smtp.User :return: A Deferred that will be fired with a MIMEMultipart message and the original recipient Message :rtype: Deferred """ # pass if the original message's content-type is "multipart/encrypted" origmsg = Parser().parsestr(raw) if origmsg.get_content_type() == 'multipart/encr