repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
veestr/sjsu-298-experiment
|
main.py
|
1
|
9053
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import webapp2
import jinja2
import os
import random
import glob
import json
import cgi
import string
import re
import time
from datetime import datetime, tzinfo, timedelta
from google.appengine.ext import db
from google.appengine.api import memcache
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
main_template=JINJA_ENVIRONMENT.get_template('index.html')
report_template=JINJA_ENVIRONMENT.get_template('report.html')
account_template=JINJA_ENVIRONMENT.get_template('account.html')
bad_password_template=JINJA_ENVIRONMENT.get_template('bad_password.html')
class Zone(tzinfo):
"""Define timezone objects."""
def __init__(self,offset,isdst,name):
self.offset = offset
self.isdst = isdst
self.name = name
def utcoffset(self, dt):
return timedelta(hours=self.offset) + self.dst(dt)
def dst(self, dt):
return timedelta(hours=1) if self.isdst else timedelta(0)
def tzname(self,dt):
return self.name
PST = Zone(-8, True, 'PST')
class Account(db.Model):
"""Defines accounts in the database."""
date = db.StringProperty()
user = db.StringProperty(indexed=True)
site = db.StringProperty()
initial_password = db.StringProperty()
second_password = db.IntegerProperty()
third_password = db.IntegerProperty()
pass
def get_all_accounts():
"""Returns all stored accounts as a list of lists."""
accounts=[]
q = Account.all()
q.order('-date')
accounts.append(['Date','User','Site','Original Password','5 Min ','1 Week'])
for account in q:
entry=[]
entry.append(account.date)
entry.append(account.user)
entry.append(account.site)
entry.append(account.initial_password)
entry.append(account.second_password)
entry.append(account.third_password)
accounts.append(entry)
return accounts
def get_possible_sites(user):
"""Returns a set of sites available for the specified user. Each site
is represented by an image in the 'images' directory. """
files=()
if user.lower().endswith("ur"):
#ASSERT: the username indicates unrelated condition
files=(file.replace('images/', '') for file in glob.glob('images/ur_*'))
elif user.lower().endswith("r"):
#ASSERT: the username indicates related condition
files=(file.replace('images/', '') for file in glob.glob('images/r_*'))
return set(files)
def get_registered_sites(user,iteration):
"""Returns a set of the sites the specified user has registered for given the
specific iteration. The sites are considered to be registered if the password is set to a
value in ragen [0,3] for the specified iteration."""
sites=set()
q=Account.all()
q.filter('user =',user)
if int(iteration)==1:
#ASSERT: Filter out the site where second_password has not been set
q.filter('second_password >=', 0).filter('second_password <=', 3)
if int(iteration)==2:
#ASSERT: Filter out the site where third_password has not been set
q.filter('third_password >=', 0).filter('third_password <=', 3)
for account in q:
sites.add(account.site)
return sites
def verify_site(user, site, password):
"""Verifies whether the password for user is correct for the specific site."""
q=Account.all()
q.filter('user =',user)
q.filter('site =', site)
result = q.get()
stored_pass = str(result.initial_password)
return stored_pass == password
def get_site_for_user(iteration, user):
"""Returns the site for the specified user or '' of no such sites can be returned."""
possible_sites=get_possible_sites(user)
last_site=memcache.get(user)
registered_sites=get_registered_sites(user,iteration)
registered_sites.add(last_site)
allowed_sites=possible_sites.difference(registered_sites)
print "Registred sites: %s" % registered_sites
print "Possible sites: %s" % possible_sites
print "Allowed sites: %s" % allowed_sites
if len(allowed_sites) > 0:
return random.sample(allowed_sites, 1).pop()
else:
return ''
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write(main_template.render())
memcache.flush_all()
pass
class ReportHandler(webapp2.RequestHandler):
def get(self):
template_values = {
'accounts' : get_all_accounts(),
}
self.response.write(report_template.render(template_values))
pass
class AccountHandler(webapp2.RequestHandler):
def get(self,iteration,attempt):
user=cgi.escape(self.request.get('user'))
site=cgi.escape(self.request.get('site'))
#TODO: Handle this more gracefully
if not user:
self.redirect('/')
possible_sites=get_possible_sites(user)
if site:
#ASSESRT: We know for which site to display the account info
selected_site = site
else:
#ASSERT: we need to figure out the site for the user and if
# such site does not exist, we need to go back to the main screen
selected_site=get_site_for_user(iteration, user)
if selected_site=="":
self.redirect('/')
if int(iteration)==1 or int(iteration)==2:
# ASSERT: The user is going to verify the site's credentials
# thus, we need a different verification procedure
action="/verify"
elif int(iteration)==0:
#ASSERT: This is the user's first time, so we need to save the info
action="/save"
template_values = {
'selected_site' : cgi.escape(selected_site),
'user': user,
'iteration': iteration,
'attempt': attempt,
'action': action,
}
self.response.write(account_template.render(template_values))
pass
def save(self):
"""Saves the account credentials and redirects to the new account page."""
user=cgi.escape(self.request.get('user'))
print "user in save(): %s" % user
password=cgi.escape(self.request.get('pass1'))
site=cgi.escape(self.request.get('site'))
iteration=int(cgi.escape(self.request.get('iteration')))
self.response.status=201
account=Account(
user=user,
initial_password=password,
site=site,
second_password=-1,
third_password=-1,
date=datetime.now(PST).strftime('%m/%d/%Y %H:%M:%S %Z')
)
account.put()
memcache.set(key=user, value=site)
new_path='/account/0/1/?user='+user
return self.redirect(new_path)
def verify(self):
"""Verifies the credentials for the site."""
user=cgi.escape(self.request.get('user'))
password=cgi.escape(self.request.get('pass1'))
site=cgi.escape(self.request.get('site'))
iteration=int(cgi.escape(self.request.get('iteration')))
attempt=int(cgi.escape(self.request.get('attempt')))
is_pass_valid=verify_site(user,site, password)
existing_accounts=db.GqlQuery("SELECT * from Account WHERE user=:1 AND site=:2",user,site).fetch(1)
account=existing_accounts[0]
if is_pass_valid:
#ASSERT: The password provided by user for the site is valid
#Mark the attempt as such and go on to the next site
if iteration==1:
account.second_password=attempt
if iteration==2:
account.third_password=attempt
new_path = '/account/'+str(iteration)+'/1/?user='+user
memcache.set(key=user, value=site)
account.put()
return self.redirect(new_path)
else:
#Comment
if attempt < 3:
#ASSERT: The pass is not valid, redirect to the next attempt for this site
next_attempt=attempt+1
new_path = '/account/'+str(iteration)+'/'+str(next_attempt)+'/?user='+user+'&site='+site
msg = "Your password did not match the one you've created for this site. "
if attempt >= 3:
#ASSERT: The pass is not valid for this site and we do not have any more attempts left
#redirect to the next site within the same iteration
if iteration==1:
account.second_password=0
if iteration==2:
account.third_password=0
new_path = '/account/'+str(iteration)+'/1/?user='+user
memcache.set(key=user, value=site)
account.put()
msg = "You have exhausted all attemps. Re-directing to the next site or the main menu if no sites are available. "
template_values = {
'attempts_left' : 3-attempt,
'target_url': new_path,
'message': msg,
}
self.response.write(bad_password_template.render(template_values))
pass
app = webapp2.WSGIApplication([
webapp2.Route(r'/', handler=MainHandler),
webapp2.Route(r'/report', handler=ReportHandler),
webapp2.Route(r'/account/<iteration>/<attempt>/', handler=AccountHandler),
webapp2.Route(r'/save', handler=AccountHandler, methods=['POST'], handler_method='save'),
webapp2.Route(r'/verify', handler=AccountHandler, methods=['POST'], handler_method='verify')
], debug=True)
|
mit
| -3,909,256,515,630,310,400 | 30.764912 | 118 | 0.709599 | false |
xiangke/pycopia
|
mibs/pycopia/mibs/SNMPv2_TM.py
|
1
|
2645
|
# python
# This file is generated by a program (mib2py). Any edits will be lost.
from pycopia.aid import Enum
import pycopia.SMI.Basetypes
Range = pycopia.SMI.Basetypes.Range
Ranges = pycopia.SMI.Basetypes.Ranges
from pycopia.SMI.Objects import ColumnObject, MacroObject, NotificationObject, RowObject, ScalarObject, NodeObject, ModuleObject, GroupObject
# imports
from SNMPv2_SMI import MODULE_IDENTITY, OBJECT_IDENTITY, snmpModules, snmpDomains, snmpProxys
from SNMPv2_TC import TEXTUAL_CONVENTION
class SNMPv2_TM(ModuleObject):
path = '/usr/share/snmp/mibs/ietf/SNMPv2-TM'
conformance = 5
name = 'SNMPv2-TM'
language = 2
description = 'The MIB module for SNMP transport mappings.\n\nCopyright (C) The Internet Society (2002). This\nversion of this MIB module is part of RFC 3417;\nsee the RFC itself for full legal notices.'
# nodes
class snmpUDPDomain(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 1, 1])
name = 'snmpUDPDomain'
class snmpCLNSDomain(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 1, 2])
name = 'snmpCLNSDomain'
class snmpCONSDomain(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 1, 3])
name = 'snmpCONSDomain'
class snmpDDPDomain(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 1, 4])
name = 'snmpDDPDomain'
class snmpIPXDomain(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 1, 5])
name = 'snmpIPXDomain'
class rfc1157Proxy(NodeObject):
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 2, 1])
name = 'rfc1157Proxy'
class rfc1157Domain(NodeObject):
status = 2
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 2, 1, 1])
name = 'rfc1157Domain'
class snmpv2tm(NodeObject):
status = 1
OID = pycopia.SMI.Basetypes.ObjectIdentifier([1, 3, 6, 1, 6, 3, 19])
name = 'snmpv2tm'
# macros
# types
class SnmpUDPAddress(pycopia.SMI.Basetypes.OctetString):
status = 1
ranges = Ranges(Range(6, 6))
format = '1d.1d.1d.1d/2d'
class SnmpOSIAddress(pycopia.SMI.Basetypes.OctetString):
status = 1
ranges = Ranges(Range(1, 1), Range(4, 85))
format = '*1x:/1x:'
class SnmpNBPAddress(pycopia.SMI.Basetypes.OctetString):
status = 1
ranges = Ranges(Range(3, 99))
class SnmpIPXAddress(pycopia.SMI.Basetypes.OctetString):
status = 1
ranges = Ranges(Range(12, 12))
format = '4x.1x:1x:1x:1x:1x:1x.2d'
# scalars
# columns
# rows
# notifications (traps)
# groups
# capabilities
# special additions
# Add to master OIDMAP.
from pycopia import SMI
SMI.update_oidmap(__name__)
|
lgpl-2.1
| 7,965,699,751,727,708,000 | 25.717172 | 204 | 0.727032 | false |
alexryndin/ambari
|
ambari-common/src/main/python/resource_management/libraries/functions/packages_analyzer.py
|
1
|
10470
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import sys
import logging
import subprocess
from threading import Thread
import threading
from ambari_commons import OSCheck, OSConst
from ambari_commons import shell
from resource_management.core.logger import Logger
from resource_management.core import shell as rmf_shell
from resource_management.core.exceptions import Fail
__all__ = ["installedPkgsByName", "allInstalledPackages", "allAvailablePackages", "nameMatch",
"getInstalledRepos", "getInstalledPkgsByRepo", "getInstalledPkgsByNames", "getPackageDetails"]
LIST_INSTALLED_PACKAGES_UBUNTU = "COLUMNS=9999 ; for i in $(dpkg -l |grep ^ii |awk -F' ' '{print $2}'); do apt-cache showpkg \"$i\"|head -3|grep -v '^Versions'| tr -d '()' | awk '{ print $1\" \"$2 }'|sed -e 's/^Package: //;' | paste -d ' ' - -; done"
LIST_AVAILABLE_PACKAGES_UBUNTU = "packages=`for i in $(ls -1 /var/lib/apt/lists | grep -v \"ubuntu.com\") ; do grep ^Package: /var/lib/apt/lists/$i | awk '{print $2}' ; done` ; for i in $packages; do apt-cache showpkg \"$i\"|head -3|grep -v '^Versions'| tr -d '()' | awk '{ print $1\" \"$2 }'|sed -e 's/^Package: //;' | paste -d ' ' - -; done"
logger = logging.getLogger()
# default timeout for async invoked processes
TIMEOUT_SECONDS = 40
def _launch_subprocess(command):
isShell = not isinstance(command, (list, tuple))
return subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=isShell, close_fds=True)
def subprocessWithTimeout(command):
event = threading.Event()
def watchdog_func(command):
event.wait(TIMEOUT_SECONDS)
if command.returncode is None:
logger.error("Task timed out and will be killed")
shell.kill_process_with_children(command.pid)
pass
osStat = _launch_subprocess(command)
logger.debug("Launching watchdog thread")
event.clear()
thread = Thread(target=watchdog_func, args=(osStat, ))
thread.start()
out, err = osStat.communicate()
result = {}
result['out'] = out
result['err'] = err
result['retCode'] = osStat.returncode
event.set()
thread.join()
return result
def installedPkgsByName(allInstalledPackages,
pkgName, installedPkgs):
"""
Get all installed package whose name starts with the
strings contained in pkgName
"""
for item in allInstalledPackages:
if item[0].find(pkgName) == 0:
installedPkgs.append(item[0])
def allInstalledPackages(allInstalledPackages):
"""
All installed packages in system
"""
osType = OSCheck.get_os_family()
if OSCheck.is_suse_family():
return _lookUpZypperPackages(
["sudo", "zypper", "--no-gpg-checks", "search", "--installed-only", "--details"],
allInstalledPackages)
elif OSCheck.is_redhat_family():
return _lookUpYumPackages(
["sudo", "yum", "list", "installed"],
'Installed Packages',
allInstalledPackages)
elif OSCheck.is_ubuntu_family():
return _lookUpAptPackages(
LIST_INSTALLED_PACKAGES_UBUNTU,
allInstalledPackages)
def allAvailablePackages(allAvailablePackages):
osType = OSCheck.get_os_family()
if OSCheck.is_suse_family():
return _lookUpZypperPackages(
["sudo", "zypper", "--no-gpg-checks", "search", "--uninstalled-only", "--details"],
allAvailablePackages)
elif OSCheck.is_redhat_family():
return _lookUpYumPackages(
["sudo", "yum", "list", "available"],
'Available Packages',
allAvailablePackages)
elif OSCheck.is_ubuntu_family():
return _lookUpAptPackages(
LIST_AVAILABLE_PACKAGES_UBUNTU,
allAvailablePackages)
# ToDo: add execution via sudo for ubuntu (currently Ubuntu is not supported)
def _lookUpAptPackages(command, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
for x in result['out'].split('\n'):
if x.strip():
allPackages.append(x.split(' '))
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def _lookUpYumPackages(command, skipTill, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
lines = result['out'].split('\n')
lines = [line.strip() for line in lines]
items = []
skipIndex = 3
for index in range(len(lines)):
if skipTill in lines[index]:
skipIndex = index + 1
break
for line in lines[skipIndex:]:
items = items + line.strip(' \t\n\r').split()
for i in range(0, len(items), 3):
if '.' in items[i]:
items[i] = items[i][:items[i].rindex('.')]
if items[i + 2].find('@') == 0:
items[i + 2] = items[i + 2][1:]
allPackages.append(items[i:i + 3])
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def _lookUpZypperPackages(command, allPackages):
try:
result = subprocessWithTimeout(command)
if 0 == result['retCode']:
lines = result['out'].split('\n')
lines = [line.strip() for line in lines]
items = []
for index in range(len(lines)):
if "--+--" in lines[index]:
skipIndex = index + 1
break
for line in lines[skipIndex:]:
items = line.strip(' \t\n\r').split('|')
allPackages.append([items[1].strip(), items[3].strip(), items[5].strip()])
except:
logger.error("Unexpected error:", sys.exc_info()[0])
def nameMatch(lookupName, actualName):
tokens = actualName.strip().split()
for token in tokens:
if token.lower().find(lookupName.lower()) == 0:
return True
return False
def getInstalledRepos(hintPackages, allPackages, ignoreRepos, repoList):
"""
Gets all installed repos by name based on repos that provide any package
contained in hintPackages
Repos starting with value in ignoreRepos will not be returned
hintPackages must be regexps.
"""
allRepos = []
for hintPackage in hintPackages:
for item in allPackages:
if re.match(hintPackage, item[0]) and not item[2] in allRepos:
allRepos.append(item[2])
for repo in allRepos:
ignore = False
for ignoredRepo in ignoreRepos:
if nameMatch(ignoredRepo, repo):
ignore = True
if not ignore:
repoList.append(repo)
def getInstalledPkgsByRepo(repos, ignorePackages, installedPackages):
"""
Get all the installed packages from the repos listed in repos
"""
packagesFromRepo = []
packagesToRemove = []
for repo in repos:
subResult = []
for item in installedPackages:
if repo == item[2]:
subResult.append(item[0])
packagesFromRepo = list(set(packagesFromRepo + subResult))
for package in packagesFromRepo:
keepPackage = True
for ignorePackage in ignorePackages:
if nameMatch(ignorePackage, package):
keepPackage = False
break
if keepPackage:
packagesToRemove.append(package)
return packagesToRemove
def getInstalledPkgsByNames(pkgNames, installedPackages):
"""
Gets all installed packages that start with names in pkgNames
"""
packages = []
for pkgName in pkgNames:
subResult = []
installedPkgsByName(installedPackages, pkgName, subResult)
packages = list(set(packages + subResult))
return packages
def getPackageDetails(installedPackages, foundPackages):
"""
Gets the name, version, and repoName for the packages
"""
packageDetails = []
for package in foundPackages:
pkgDetail = {}
for installedPackage in installedPackages:
if package == installedPackage[0]:
pkgDetail['name'] = installedPackage[0]
pkgDetail['version'] = installedPackage[1]
pkgDetail['repoName'] = installedPackage[2]
packageDetails.append(pkgDetail)
return packageDetails
def getReposToRemove(repos, ignoreList):
reposToRemove = []
for repo in repos:
addToRemoveList = True
for ignoreRepo in ignoreList:
if nameMatch(ignoreRepo, repo):
addToRemoveList = False
continue
if addToRemoveList:
reposToRemove.append(repo)
return reposToRemove
def getInstalledPackageVersion(package_name):
if OSCheck.is_ubuntu_family():
code, out, err = rmf_shell.checked_call("dpkg -s {0} | grep Version | awk '{{print $2}}'".format(package_name), stderr=subprocess.PIPE)
else:
code, out, err = rmf_shell.checked_call("rpm -q --queryformat '%{{version}}-%{{release}}' {0} | sed -e 's/\.el[0-9]//g'".format(package_name), stderr=subprocess.PIPE)
return out
def verifyDependencies():
"""
Verify that we have no dependency issues in package manager. Dependency issues could appear because of aborted or terminated
package installation process or invalid packages state after manual modification of packages list on the host
:return True if no dependency issues found, False if dependency issue present
:rtype bool
"""
check_str = None
cmd = None
if OSCheck.is_redhat_family():
cmd = ['/usr/bin/yum', '-d', '0', '-e', '0', 'check', 'dependencies']
check_str = "has missing requires|Error:"
elif OSCheck.is_suse_family():
cmd = ['/usr/bin/zypper', '--quiet', '--non-interactive', 'verify', '--dry-run']
check_str = "\d+ new package(s)? to install"
elif OSCheck.is_ubuntu_family():
cmd = ['/usr/bin/apt-get', '-qq', 'check']
check_str = "has missing dependency|E:"
if check_str is None or cmd is None:
raise Fail("Unsupported OSFamily on the Agent Host")
code, out = rmf_shell.checked_call(cmd, sudo=True)
output_regex = re.compile(check_str)
if code or (out and output_regex.search(out)):
err_msg = Logger.filter_text("Failed to verify package dependencies. Execution of '%s' returned %s. %s" % (cmd, code, out))
Logger.error(err_msg)
return False
return True
|
apache-2.0
| -3,688,034,880,890,380,000 | 31.71875 | 352 | 0.675454 | false |
openpli-arm/enigma2-arm
|
lib/python/Components/PluginComponent.py
|
1
|
4036
|
from os import path as os_path, listdir as os_listdir
from traceback import print_exc
from sys import stdout
from Tools.Directories import fileExists
from Tools.Import import my_import
from Plugins.Plugin import PluginDescriptor
import keymapparser
class PluginComponent:
firstRun = True
restartRequired = False
def __init__(self):
self.plugins = {}
self.pluginList = [ ]
self.setPluginPrefix("Plugins.")
self.resetWarnings()
def setPluginPrefix(self, prefix):
self.prefix = prefix
def addPlugin(self, plugin):
if self.firstRun or not plugin.needsRestart:
self.pluginList.append(plugin)
for x in plugin.where:
self.plugins.setdefault(x, []).append(plugin)
if x == PluginDescriptor.WHERE_AUTOSTART:
plugin(reason=0)
else:
self.restartRequired = True
def removePlugin(self, plugin):
self.pluginList.remove(plugin)
for x in plugin.where:
self.plugins[x].remove(plugin)
if x == PluginDescriptor.WHERE_AUTOSTART:
plugin(reason=1)
def readPluginList(self, directory):
"""enumerates plugins"""
categories = os_listdir(directory)
new_plugins = [ ]
for c in categories:
directory_category = directory + c
if not os_path.isdir(directory_category):
continue
open(directory_category + "/__init__.py", "a").close()
for pluginname in os_listdir(directory_category):
path = directory_category + "/" + pluginname
if os_path.isdir(path):
if fileExists(path + "/plugin.pyc") or fileExists(path + "/plugin.pyo") or fileExists(path + "/plugin.py"):
try:
plugin = my_import('.'.join(["Plugins", c, pluginname, "plugin"]))
if not plugin.__dict__.has_key("Plugins"):
print "Plugin %s doesn't have 'Plugin'-call." % (pluginname)
continue
plugins = plugin.Plugins(path=path)
except Exception, exc:
print "Plugin ", c + "/" + pluginname, "failed to load:", exc
print_exc(file=stdout)
print "skipping plugin."
self.warnings.append( (c + "/" + pluginname, str(exc)) )
continue
# allow single entry not to be a list
if not isinstance(plugins, list):
plugins = [ plugins ]
for p in plugins:
p.updateIcon(path)
new_plugins.append(p)
if fileExists(path + "/keymap.xml"):
try:
keymapparser.readKeymap(path + "/keymap.xml")
except Exception, exc:
print "keymap for plugin %s/%s failed to load: " % (c, pluginname), exc
self.warnings.append( (c + "/" + pluginname, str(exc)) )
# build a diff between the old list of plugins and the new one
# internally, the "fnc" argument will be compared with __eq__
plugins_added = [p for p in new_plugins if p not in self.pluginList]
plugins_removed = [p for p in self.pluginList if not p.internal and p not in new_plugins]
#ignore already installed but reloaded plugins
for p in plugins_removed:
for pa in plugins_added:
if pa.name == p.name and pa.where == p.where:
pa.needsRestart = False
for p in plugins_removed:
self.removePlugin(p)
for p in plugins_added:
self.addPlugin(p)
if self.firstRun:
self.firstRun = False
def getPlugins(self, where):
"""Get list of plugins in a specific category"""
if not isinstance(where, list):
where = [ where ]
res = [ ]
for x in where:
res.extend(self.plugins.get(x, [ ]))
res.sort(key=lambda x:x.weight)
return res
def getPluginsForMenu(self, menuid):
res = [ ]
for p in self.getPlugins(PluginDescriptor.WHERE_MENU):
res += p(menuid)
return res
def clearPluginList(self):
self.pluginList = []
self.plugins = {}
self.firstRun = True
self.restartRequired = False
def shutdown(self):
for p in self.pluginList[:]:
self.removePlugin(p)
def resetWarnings(self):
self.warnings = [ ]
def getNextWakeupTime(self):
wakeup = -1
for p in self.pluginList:
current = p.getWakeupTime()
if current > -1 and (wakeup > current or wakeup == -1):
wakeup = current
return int(wakeup)
plugins = PluginComponent()
|
gpl-2.0
| 6,843,298,663,866,455,000 | 26.834483 | 112 | 0.665263 | false |
SydneyUniLibrary/auto-holds
|
patron/migrations/0007_auto_20160316_0257.py
|
1
|
1269
|
# Copyright 2016 Susan Bennett, David Mitchell, Jim Nicholls
#
# This file is part of AutoHolds.
#
# AutoHolds is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AutoHolds is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AutoHolds. If not, see <http://www.gnu.org/licenses/>.
#
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-03-16 02:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('patron', '0006_registration_language'),
]
operations = [
migrations.AlterField(
model_name='registration',
name='format',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='patron.Format'),
),
]
|
gpl-3.0
| -6,266,376,468,078,874,000 | 31.538462 | 101 | 0.708432 | false |
xen0n/gingerprawn
|
gingerprawn/shrimp/academic/academic_curricula_frame.py
|
1
|
8581
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# JNMaster / academic / curriculum querier interface
# the original code was contributed by Chen Huayue, later almost entirely
# rewritten by Wang Xuerui.
# Copyright (C) 2011 Chen Huayue <489412949@qq.com>
# Copyright (C) 2011 Wang Xuerui <idontknw.wang@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
import wx
# import wx.html
# dummy i18n
_ = lambda x: x
from gingerprawn.api import logger
logger.install()
# university academic affairs system's interface
from gingerprawn.api import univlib
jwxt = univlib.current.jwxt
# for common login behavior
from academic_login_mixin import JWXTLoginThreadMixin
#############################################################################
## SEPARATOR BETWEEN DECLARATIONS AND (MAINLY) GUI IMPLEMENTATION
#############################################################################
# statusbar class with a pulsing progress bar, very good for showing
# progress
from gingerprawn.api.ui.statusbar import ProgressStatusBar
# for an (again) very fancy curriculum page
from gingerprawn.api.ui.spangrid import RowSpanGrid
# Layout constants
KBSize=(300, 160)
showKBSize=(1280, 768)
FirstLineSpace = 10
LineHeight = 30 # modified for better look under wxGTK (Ubuntu Linux)
LineIndent = 10
class curricula_frame(wx.Frame, JWXTLoginThreadMixin):
# for layout use
def GetOrder(self, start=0):
cur = [start]
def incr(add, LineAdd=0):
cur[0] += add
return wx.Point(LineIndent + LineAdd,
FirstLineSpace + LineHeight * cur[0])
return incr
def _LoginThread(self, parent, userinfo, cfg_cache):
JWXTLoginThreadMixin._LoginThread(self, parent, userinfo, cfg_cache)
wx.CallAfter(parent.notify_status, _(u'获取学年与学期信息'))
self._kbList1, self._kbList2 = self._affairs.prepare4curriculum()
yr_default, tm_default = self._affairs.curriculum_defaults
yr_default = self._kbList1.index(yr_default)
tm_default = self._kbList2.index(tm_default)
# List of choice initialization postponed, because the lists
# are not yet available at the time of overall frame init
wx.CallAfter(self.kbL1.SetItems, self._kbList1)
wx.CallAfter(self.kbL2.SetItems, self._kbList2)
wx.CallAfter(self.kbL2.InvalidateBestSize)
wx.CallAfter(self.kbL1.Fit)
wx.CallAfter(self.kbL2.Fit)
wx.CallAfter(self.kbL1.Select, yr_default)
wx.CallAfter(self.kbL2.Select, tm_default)
wx.CallAfter(self.SetStatusText, _(u'请选择学年和学期'))
wx.CallAfter(parent.notify_status, _(u'准备就绪'))
wx.CallAfter(parent.toggle_working)
wx.CallAfter(self.Show)
return
def __init__(self, parent, userinfo, cfg_cache):
wx.Frame.__init__(self, parent, wx.ID_ANY, _(u'课表查询'), size=KBSize)
self.SetMaxSize(KBSize)
self.SetMinSize(KBSize)
self.__parent = parent
# bind the close handler to auto-logout before closing down
self.Bind(wx.EVT_CLOSE, self.OnClose)
# set background color to get some native feel on MSW
if wx.Platform == '__WXMSW__':
self.SetBackgroundColour(wx.SystemSettings.GetColour(
wx.SYS_COLOUR_3DFACE))
# this is preserved for the showing frame
self.__userid = userinfo['usr']
########################################################
## LAYOUT SPEC
order=self.GetOrder(0)
pnl = self.panelMain = wx.Panel(self, wx.ID_ANY, style=wx.EXPAND)
wx.StaticText(pnl, wx.ID_ANY, _(u'课表查询'), pos=order(0))
self.kbL1 = wx.Choice(pnl, pos=order(1), size=(130, -1))
self.kbL2 = wx.Choice(pnl, pos=order(0, 135), size=(60, -1))
self.kbB=wx.Button(pnl, label=_(u'查询'), pos=order(0, 204),
size=(60, -1))
wx.StaticText(pnl, label=_(u'请从下拉菜单中选择想查询的学期'),
pos=order(1))
self.Bind(wx.EVT_BUTTON, self.KB, self.kbB)
self.statusbar = ProgressStatusBar(self)
self.SetStatusBar(self.statusbar)
########################################################
## INITIALIZATION
thrd = threading.Thread(target=self._LoginThread,
args=(parent, userinfo, cfg_cache, ),
name='academic_LoginThread')
thrd.daemon = True
thrd.start()
def OnClose(self, evt):
thrd = threading.Thread(target=self._LogoutThread,
args=(self.__parent, ), # toggle=True
name='academic_LogoutThread')
thrd.daemon = True
thrd.start()
evt.Skip()
def _QueryThread(self, yr, tm):
wx.CallAfter(self.notify_status, _(u'查询中'))
wx.CallAfter(self.toggle_working)
try:
self._affairs.get_curriculum(yr, tm) #, raw=True)
except Exception, e:
logexception('unexpected exc:\n%s', `e`)
wx.CallAfter(self.notify_status,
_(u'查询出错,请重试;若仍然出错,请报告 Bug'))
wx.CallAfter(self.toggle_working)
return
_r = self._affairs.curriculum[(yr, tm)]
# gui operation must be protected
wx.CallAfter(self.do_showKB, _r)
wx.CallAfter(self.toggle_working)
return
def KB(self, evt):
# Gather and validate input.
yr = self._affairs.curriculum_years[self.kbL1.GetSelection()]
if yr == -1:
self.SetStatusText(_(u'学年不能为空'))
return
term = self._affairs.curriculum_terms[self.kbL2.GetSelection()]
# Data gathering complete, spawn worker thread.
thrd = threading.Thread(target=self._QueryThread,
args=(yr, term, ),
name='academic_QueryThread')
thrd.daemon = True
thrd.start()
def do_showKB(self, rawdata):
'''\
This GUI operation must be done in the main thread, so we have to
encapsulate it into a function.
'''
showKB(self, rawdata, self.__userid)
def notify_status(self, msg):
self.SetStatusText(msg)
def toggle_working(self):
self.statusbar.ToggleStatus()
class showKB(wx.Frame):
def __init__(self, parent, content, username):
wx.Frame.__init__(self, parent,
title=_(u'%s 的课表') % username,
size=showKBSize)
self.__parent = parent
self.Bind(wx.EVT_CLOSE, self.OnClose)
try:
self.curriculum_grid = RowSpanGrid(self, wx.ID_ANY, content)
except:
logexception('exc when opening grid window for result')
parent.SetStatusText(
_(u'无法展开课表,请重试;若仍然失败,请报告 Bug'))
# this auto size, thanks to Robin Dunn for pointing out the method in an
# earlier mail list post, has brought a MUCH BETTER look
self.curriculum_grid.AutoSize()
self.Fit()
# we're done, show up!
self.Show(True)
parent.notify_status(_(u'课表已打开'))
# html = wx.html.HtmlWindow(self)
# if wx.Platform == '__WXGTK__':
# html.SetStandardFonts()
# try:
# html.SetPage(content)
# self.Show(True)
# parent.notify_status(_(u'课表已打开'))
# except:
# logexception('exc when opening htmlwindow for result')
# parent.SetStatusText(
# _(u'无法展开课表,请重试;若仍然失败,请报告 Bug'))
def OnClose(self, evt):
self.__parent.notify_status(_(u'课表窗口已关闭'))
evt.Skip()
def invoke(prnt, userinfo, cfg_obj):
frame = curricula_frame(prnt, userinfo, cfg_obj)
# frame.Show()
# vi:ai:et:ts=4 sw=4 sts=4 fenc=utf-8
|
gpl-3.0
| 7,586,603,775,980,176,000 | 33.645833 | 80 | 0.600722 | false |
MediffRobotics/DeepRobotics
|
DeepLearnMaterials/lstm_tfl.py
|
1
|
1796
|
# -*- coding: utf-8 -*-
"""
Simple example using LSTM recurrent neural network to classify IMDB
sentiment dataset.
References:
- Long Short Term Memory, Sepp Hochreiter & Jurgen Schmidhuber, Neural
Computation 9(8): 1735-1780, 1997.
- Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng,
and Christopher Potts. (2011). Learning Word Vectors for Sentiment
Analysis. The 49th Annual Meeting of the Association for Computational
Linguistics (ACL 2011).
Links:
- http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf
- http://ai.stanford.edu/~amaas/data/sentiment/
"""
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from tflearn.datasets import imdb
# IMDB Dataset loading
train, test, _ = imdb.load_data(path='imdb.pkl', n_words=10000,
valid_portion=0.99)
trainX, trainY = train
#testX, testY = test
testX, testY = train
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch=1,validation_set=(testX, testY), show_metric=True,
batch_size=32)
|
gpl-3.0
| -6,052,200,692,671,277,000 | 34.92 | 84 | 0.711581 | false |
SeattleTestbed/repy_v2
|
namespace.py
|
1
|
39911
|
"""
<Program>
namespace.py
<Started>
September 2009
<Author>
Justin Samuel
<Purpose>
This is the namespace layer that ensures separation of the namespaces of
untrusted code and our code. It provides a single public function to be
used to setup the context in which untrusted code is exec'd (that is, the
context that is seen as the __builtins__ by the untrusted code).
The general idea is that any function or object that is available between
trusted and untrusted code gets wrapped in a function or object that does
validation when the function or object is used. In general, if user code
is not calling any functions improperly, neither the user code nor our
trusted code should ever notice that the objects and functions they are
dealing with have been wrapped by this namespace layer.
All of our own api functions are wrapped in NamespaceAPIFunctionWrapper
objects whose wrapped_function() method is mapped in to the untrusted
code's context. When called, the wrapped_function() method performs
argument, return value, and exception validation as well as additional
wrapping and unwrapping, as needed, that is specific to the function
that was ultimately being called. If the return value or raised exceptions
are not considered acceptable, a NamespaceViolationError is raised. If the
arguments are not acceptable, a TypeError is raised.
Note that callback functions that are passed from untrusted user code
to trusted code are also wrapped (these are arguments to wrapped API
functions, so we get to wrap them before calling the underlying function).
The reason we wrap these is so that we can intercept calls to the callback
functions and wrap arguments passed to them, making sure that handles
passed as arguments to the callbacks get wrapped before user code sees them.
The function and object wrappers have been defined based on the API as
documented at https://seattle.cs.washington.edu/wiki/RepyLibrary
Example of using this module (this is really the only way to use the module):
import namespace
usercontext = {}
namespace.wrap_and_insert_api_functions(usercontext)
safe.safe_exec(usercode, usercontext)
The above code will result in the dict usercontext being populated with keys
that are the names of the functions available to the untrusted code (such as
'open') and the values are the wrapped versions of the actual functions to be
called (such as 'emulfile.emulated_open').
Note that some functions wrapped by this module lose some python argument
flexibility. Wrapped functions can generally only have keyword args in
situations where the arguments are optional. Using keyword arguments for
required args may not be supported, depending on the implementation of the
specific argument check/wrapping/unwrapping helper functions for that
particular wrapped function. If this becomes a problem, it can be dealt with
by complicating some of the argument checking/wrapping/unwrapping code in
this module to make the checking functions more flexible in how they take
their arguments.
Implementation details:
The majority of the code in this module is made up of helper functions to do
argument checking, etc. for specific wrapped functions.
The most important parts to look at in this module for maintenance and
auditing are the following:
USERCONTEXT_WRAPPER_INFO
The USERCONTEXT_WRAPPER_INFO is a dictionary that defines the API
functions that are wrapped and inserted into the user context when
wrap_and_insert_api_functions() is called.
FILE_OBJECT_WRAPPER_INFO
LOCK_OBJECT_WRAPPER_INFO
TCP_SOCKET_OBJECT_WRAPPER_INFO
TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO
UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO
VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO
The above four dictionaries define the methods available on the wrapped
objects that are returned by wrapped functions. Additionally, timerhandle
and commhandle objects are wrapped but instances of these do not have any
public methods and so no *_WRAPPER_INFO dictionaries are defined for them.
NamespaceObjectWrapper
NamespaceAPIFunctionWrapper
The above two classes are the only two types of objects that will be
allowed in untrusted code. In fact, instances of NamespaceAPIFunctionWrapper
are never actually allowed in untrusted code. Rather, each function that
is wrapped has a single NamespaceAPIFunctionWrapper instance created
when wrap_and_insert_api_functions() is called and what is actually made
available to the untrusted code is the wrapped_function() method of each
of the corresponding NamespaceAPIFunctionWrapper instances.
NamespaceInternalError
If this error is raised anywhere (along with any other unexpected exceptions),
it should result in termination of the running program (see the except blocks
in NamespaceAPIFunctionWrapper.wrapped_function).
"""
import types
# To check if objects are thread.LockType objects.
import thread
import emulcomm
import emulfile
import emulmisc
import emultimer
import nonportable
import safe # Used to get SafeDict
import tracebackrepy
import virtual_namespace
from exception_hierarchy import *
# Save a copy of a few functions not available at runtime.
_saved_getattr = getattr
_saved_callable = callable
_saved_hash = hash
_saved_id = id
##############################################################################
# Public functions of this module to be called from the outside.
##############################################################################
def wrap_and_insert_api_functions(usercontext):
"""
This is the main public function in this module at the current time. It will
wrap each function in the usercontext dict in a wrapper with custom
restrictions for that specific function. These custom restrictions are
defined in the dictionary USERCONTEXT_WRAPPER_INFO.
"""
_init_namespace()
for function_name in USERCONTEXT_WRAPPER_INFO:
function_info = USERCONTEXT_WRAPPER_INFO[function_name]
wrapperobj = NamespaceAPIFunctionWrapper(function_info)
usercontext[function_name] = wrapperobj.wrapped_function
##############################################################################
# Helper functions for the above public function.
##############################################################################
# Whether _init_namespace() has already been called.
initialized = False
def _init_namespace():
"""
Performs one-time initialization of the namespace module.
"""
global initialized
if not initialized:
initialized = True
_prepare_wrapped_functions_for_object_wrappers()
# These dictionaries will ultimately contain keys whose names are allowed
# methods that can be called on the objects and values which are the wrapped
# versions of the functions which are exposed to users. If a dictionary
# is empty, it means no methods can be called on a wrapped object of that type.
file_object_wrapped_functions_dict = {}
lock_object_wrapped_functions_dict = {}
tcp_socket_object_wrapped_functions_dict = {}
tcp_server_socket_object_wrapped_functions_dict = {}
udp_server_socket_object_wrapped_functions_dict = {}
virtual_namespace_object_wrapped_functions_dict = {}
def _prepare_wrapped_functions_for_object_wrappers():
"""
Wraps functions that will be used whenever a wrapped object is created.
After this has been called, the dictionaries such as
file_object_wrapped_functions_dict have been populated and therefore can be
used by functions such as wrap_socket_obj().
"""
objects_tuples = [(FILE_OBJECT_WRAPPER_INFO, file_object_wrapped_functions_dict),
(LOCK_OBJECT_WRAPPER_INFO, lock_object_wrapped_functions_dict),
(TCP_SOCKET_OBJECT_WRAPPER_INFO, tcp_socket_object_wrapped_functions_dict),
(TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, tcp_server_socket_object_wrapped_functions_dict),
(UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO, udp_server_socket_object_wrapped_functions_dict),
(VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO, virtual_namespace_object_wrapped_functions_dict)]
for description_dict, wrapped_func_dict in objects_tuples:
for function_name in description_dict:
function_info = description_dict[function_name]
wrapperobj = NamespaceAPIFunctionWrapper(function_info, is_method=True)
wrapped_func_dict[function_name] = wrapperobj.wrapped_function
##############################################################################
# Helper functions.
##############################################################################
def _handle_internalerror(message, exitcode):
"""
Terminate the running program. This is used rather than
tracebackrepy.handle_internalerror directly in order to make testing easier."""
tracebackrepy.handle_internalerror(message, exitcode)
def _is_in(obj, sequence):
"""
A helper function to do identity ("is") checks instead of equality ("==")
when using X in [A, B, C] type constructs. So you would write:
if _is_in(type(foo), [int, long]):
instead of:
if type(foo) in [int, long]:
"""
for item in sequence:
if obj is item:
return True
return False
##############################################################################
# Constants that define which functions should be wrapped and how. These are
# used by the functions wrap_and_insert_api_functions() and
# wrap_builtin_functions().
##############################################################################
class BaseProcessor(object):
"""Base type for ValueProcess and ObjectProcessor."""
class ValueProcessor(BaseProcessor):
"""
This is for simple/builtin types and combinations of them. Basically,
anything that needs to be copied when used as an argument or return
value and doesn't need to be wrapped or unwrapped as it passes through
the namespace layer.
"""
def check(self):
raise NotImplementedError
def copy(self, val):
return _copy(val)
class ObjectProcessor(BaseProcessor):
"""
This is for for anything that needs to be wrapped or unwrapped (not copied)
as it passes through the namespace layer.
"""
def check(self):
raise NotImplementedError
def wrap(self, val):
raise NotImplementedError
def unwrap(self, val):
return val._wrapped__object
class Str(ValueProcessor):
"""Allows str or unicode."""
def __init__(self, maxlen=None, minlen=None):
self.maxlen = maxlen
self.minlen = minlen
def check(self, val):
if not _is_in(type(val), [str, unicode]):
raise RepyArgumentError("Invalid type %s" % type(val))
if self.maxlen is not None:
if len(val) > self.maxlen:
raise RepyArgumentError("Max string length is %s" % self.maxlen)
if self.minlen is not None:
if len(val) < self.minlen:
raise RepyArgumentError("Min string length is %s" % self.minlen)
class Int(ValueProcessor):
"""Allows int or long."""
def __init__(self, min=0):
self.min = min
def check(self, val):
if not _is_in(type(val), [int, long]):
raise RepyArgumentError("Invalid type %s" % type(val))
if val < self.min:
raise RepyArgumentError("Min value is %s." % self.min)
class NoneOrInt(ValueProcessor):
"""Allows a NoneType or an int. This doesn't enforce min limit on the
ints."""
def check(self, val):
if val is not None and not _is_in(type(val), [int, long]):
raise RepyArgumentError("Invalid type %s" % type(val))
class StrOrInt(ValueProcessor):
"""Allows a string or int. This doesn't enforce max/min/length limits on the
strings and ints."""
def check(self, val):
if not _is_in(type(val), [int, long, str, unicode]):
raise RepyArgumentError("Invalid type %s" % type(val))
class StrOrNone(ValueProcessor):
"""Allows str, unicode, or None."""
def check(self, val):
if val is not None:
Str().check(val)
class Float(ValueProcessor):
"""Allows float, int, or long."""
def __init__(self, allow_neg=False):
self.allow_neg = allow_neg
def check(self, val):
if not _is_in(type(val), [int, long, float]):
raise RepyArgumentError("Invalid type %s" % type(val))
if not self.allow_neg:
if val < 0:
raise RepyArgumentError("Must be non-negative.")
class Bool(ValueProcessor):
"""Allows bool."""
def check(self, val):
if type(val) is not bool:
raise RepyArgumentError("Invalid type %s" % type(val))
class ListOfStr(ValueProcessor):
"""Allows lists of strings. This doesn't enforce max/min/length limits on the
strings and ints."""
def check(self, val):
if not type(val) is list:
raise RepyArgumentError("Invalid type %s" % type(val))
for item in val:
Str().check(item)
class List(ValueProcessor):
"""Allows lists. The list may contain anything."""
def check(self, val):
if not type(val) is list:
raise RepyArgumentError("Invalid type %s" % type(val))
class Dict(ValueProcessor):
"""Allows dictionaries. The dictionaries may contain anything."""
def check(self, val):
if not type(val) is dict:
raise RepyArgumentError("Invalid type %s" % type(val))
class DictOfStrOrInt(ValueProcessor):
"""
Allows a tuple that contains dictionaries that only contain string keys
and str or int values. This doesn't enforce max/min/length limits on the
strings and ints.
"""
def check(self, val):
if not type(val) is dict:
raise RepyArgumentError("Invalid type %s" % type(val))
for key, value in val.items():
Str().check(key)
StrOrInt().check(value)
class Func(ValueProcessor):
"""Allows a user-defined function object."""
def check(self, val):
if not _is_in(type(val), [types.FunctionType, types.LambdaType, types.MethodType]):
raise RepyArgumentError("Invalid type %s" % type(val))
class NonCopiedVarArgs(ValueProcessor):
"""Allows any number of arguments. This must be the last arg listed. """
def check(self, val):
pass
def copy(self, val):
return val
class File(ObjectProcessor):
"""Allows File objects."""
def check(self, val):
if not isinstance(val, emulfile.emulated_file):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("file", val, file_object_wrapped_functions_dict)
class Lock(ObjectProcessor):
"""Allows Lock objects."""
def check(self, val):
if not isinstance(val, emulmisc.emulated_lock):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("lock", val, lock_object_wrapped_functions_dict)
class UDPServerSocket(ObjectProcessor):
"""Allows UDPServerSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.UDPServerSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, udp_server_socket_object_wrapped_functions_dict)
class TCPServerSocket(ObjectProcessor):
"""Allows TCPServerSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.TCPServerSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, tcp_server_socket_object_wrapped_functions_dict)
class TCPSocket(ObjectProcessor):
"""Allows TCPSocket objects."""
def check(self, val):
if not isinstance(val, emulcomm.EmulatedSocket):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("socket", val, tcp_socket_object_wrapped_functions_dict)
class VirtualNamespace(ObjectProcessor):
"""Allows VirtualNamespace objects."""
def check(self, val):
if not isinstance(val, virtual_namespace.VirtualNamespace):
raise RepyArgumentError("Invalid type %s" % type(val))
def wrap(self, val):
return NamespaceObjectWrapper("VirtualNamespace", val,
virtual_namespace_object_wrapped_functions_dict)
class SafeDict(ValueProcessor):
"""Allows SafeDict objects."""
# TODO: provide a copy function that won't actually copy so that
# references are maintained.
def check(self, val):
if not isinstance(val, safe.SafeDict):
raise RepyArgumentError("Invalid type %s" % type(val))
class DictOrSafeDict(ValueProcessor):
"""Allows SafeDict objects or regular dict objects."""
# TODO: provide a copy function that won't actually copy so that
# references are maintained.
def check(self, val):
if type(val) is not dict:
SafeDict().check(val)
# These are the functions in the user's name space excluding the builtins we
# allow. Each function is a key in the dictionary. Each value is a dictionary
# that defines the functions to be used by the wrapper when a call is
# performed. It is the same dictionary that is passed as a constructor to
# the NamespaceAPIFunctionWrapper class to create the actual wrappers.
# The public function wrap_and_insert_api_functions() uses this dictionary as
# the basis for what is populated in the user context. Anything function
# defined here will be wrapped and made available to untrusted user code.
USERCONTEXT_WRAPPER_INFO = {
'gethostbyname' :
{'func' : emulcomm.gethostbyname,
'args' : [Str()],
'return' : Str()},
'getmyip' :
{'func' : emulcomm.getmyip,
'args' : [],
'return' : Str()},
'sendmessage' :
{'func' : emulcomm.sendmessage,
'args' : [Str(), Int(), Str(), Str(), Int()],
'return' : Int()},
'listenformessage' :
{'func' : emulcomm.listenformessage,
'args' : [Str(), Int()],
'return' : UDPServerSocket()},
'openconnection' :
{'func' : emulcomm.openconnection,
'args' : [Str(), Int(), Str(), Int(), Float()],
# 'raise' : [AddressBindingError, PortRestrictedError, PortInUseError,
# ConnectionRefusedError, TimeoutError, RepyArgumentError],
'return' : TCPSocket()},
'listenforconnection' :
{'func' : emulcomm.listenforconnection,
'args' : [Str(), Int()],
'return' : TCPServerSocket()},
'openfile' :
{'func' : emulfile.emulated_open,
'args' : [Str(maxlen=120), Bool()],
'return' : File()},
'listfiles' :
{'func' : emulfile.listfiles,
'args' : [],
'return' : ListOfStr()},
'removefile' :
{'func' : emulfile.removefile,
'args' : [Str(maxlen=120)],
'return' : None},
'exitall' :
{'func' : emulmisc.exitall,
'args' : [],
'return' : None},
'createlock' :
{'func' : emulmisc.createlock,
'args' : [],
'return' : Lock()},
'getruntime' :
{'func' : emulmisc.getruntime,
'args' : [],
'return' : Float()},
'randombytes' :
{'func' : emulmisc.randombytes,
'args' : [],
'return' : Str(maxlen=1024, minlen=1024)},
'createthread' :
{'func' : emultimer.createthread,
'args' : [Func()],
'return' : None},
'sleep' :
{'func' : emultimer.sleep,
'args' : [Float()],
'return' : None},
'log' :
{'func' : emulmisc.log,
'args' : [NonCopiedVarArgs()],
'return' : None},
'getthreadname' :
{'func' : emulmisc.getthreadname,
'args' : [],
'return' : Str()},
'createvirtualnamespace' :
{'func' : virtual_namespace.createvirtualnamespace,
'args' : [Str(), Str()],
'return' : VirtualNamespace()},
'getresources' :
{'func' : nonportable.get_resources,
'args' : [],
'return' : (Dict(), Dict(), List())},
'getlasterror' :
{'func' : emulmisc.getlasterror,
'args' : [],
'return' : StrOrNone()},
}
FILE_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulfile.emulated_file.close,
'args' : [],
'return' : None},
'readat' :
{'func' : emulfile.emulated_file.readat,
'args' : [NoneOrInt(), Int(min=0)],
'return' : Str()},
'writeat' :
{'func' : emulfile.emulated_file.writeat,
'args' : [Str(), Int(min=0)],
'return' : None},
}
TCP_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.EmulatedSocket.close,
'args' : [],
'return' : Bool()},
'recv' :
{'func' : emulcomm.EmulatedSocket.recv,
'args' : [Int(min=1)],
'return' : Str()},
'send' :
{'func' : emulcomm.EmulatedSocket.send,
'args' : [Str()],
'return' : Int(min=0)},
}
# TODO: Figure out which real object should be wrapped. It doesn't appear
# to be implemented yet as there is no "getconnection" in the repy_v2 source.
TCP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.TCPServerSocket.close,
'args' : [],
'return' : Bool()},
'getconnection' :
{'func' : emulcomm.TCPServerSocket.getconnection,
'args' : [],
'return' : (Str(), Int(), TCPSocket())},
}
UDP_SERVER_SOCKET_OBJECT_WRAPPER_INFO = {
'close' :
{'func' : emulcomm.UDPServerSocket.close,
'args' : [],
'return' : Bool()},
'getmessage' :
{'func' : emulcomm.UDPServerSocket.getmessage,
'args' : [],
'return' : (Str(), Int(), Str())},
}
LOCK_OBJECT_WRAPPER_INFO = {
'acquire' :
# A string for the target_func indicates a function by this name on the
# instance rather is what should be wrapped.
{'func' : 'acquire',
'args' : [Bool()],
'return' : Bool()},
'release' :
# A string for the target_func indicates a function by this name on the
# instance rather is what should be wrapped.
{'func' : 'release',
'args' : [],
'return' : None},
}
VIRTUAL_NAMESPACE_OBJECT_WRAPPER_INFO = {
# Evaluate must take a dict or SafeDict, and can
# only return a SafeDict. We must _not_ copy the
# dict since that will screw up the references in the dict.
'evaluate' :
{'func' : 'evaluate',
'args' : [DictOrSafeDict()],
'return' : SafeDict()},
}
##############################################################################
# The classes we define from which actual wrappers are instantiated.
##############################################################################
def _copy(obj, objectmap=None):
"""
<Purpose>
Create a deep copy of an object without using the python 'copy' module.
Using copy.deepcopy() doesn't work because builtins like id and hasattr
aren't available when this is called.
<Arguments>
obj
The object to make a deep copy of.
objectmap
A mapping between original objects and the corresponding copy. This is
used to handle circular references.
<Exceptions>
TypeError
If an object is encountered that we don't know how to make a copy of.
NamespaceViolationError
If an unexpected error occurs while copying. This isn't the greatest
solution, but in general the idea is we just need to abort the wrapped
function call.
<Side Effects>
A new reference is created to every non-simple type of object. That is,
everything except objects of type str, unicode, int, etc.
<Returns>
The deep copy of obj with circular/recursive references preserved.
"""
try:
# If this is a top-level call to _copy, create a new objectmap for use
# by recursive calls to _copy.
if objectmap is None:
objectmap = {}
# If this is a circular reference, use the copy we already made.
elif _saved_id(obj) in objectmap:
return objectmap[_saved_id(obj)]
# types.InstanceType is included because the user can provide an instance
# of a class of their own in the list of callback args to settimer.
if _is_in(type(obj), [str, unicode, int, long, float, complex, bool, frozenset,
types.NoneType, types.FunctionType, types.LambdaType,
types.MethodType, types.InstanceType]):
return obj
elif type(obj) is list:
temp_list = []
# Need to save this in the objectmap before recursing because lists
# might have circular references.
objectmap[_saved_id(obj)] = temp_list
for item in obj:
temp_list.append(_copy(item, objectmap))
return temp_list
elif type(obj) is tuple:
temp_list = []
for item in obj:
temp_list.append(_copy(item, objectmap))
# I'm not 100% confident on my reasoning here, so feel free to point
# out where I'm wrong: There's no way for a tuple to directly contain
# a circular reference to itself. Instead, it has to contain, for
# example, a dict which has the same tuple as a value. In that
# situation, we can avoid infinite recursion and properly maintain
# circular references in our copies by checking the objectmap right
# after we do the copy of each item in the tuple. The existence of the
# dictionary would keep the recursion from being infinite because those
# are properly handled. That just leaves making sure we end up with
# only one copy of the tuple. We do that here by checking to see if we
# just made a copy as a result of copying the items above. If so, we
# return the one that's already been made.
if _saved_id(obj) in objectmap:
return objectmap[_saved_id(obj)]
retval = tuple(temp_list)
objectmap[_saved_id(obj)] = retval
return retval
elif type(obj) is set:
temp_list = []
# We can't just store this list object in the objectmap because it isn't
# a set yet. If it's possible to have a set contain a reference to
# itself, this could result in infinite recursion. However, sets can
# only contain hashable items so I believe this can't happen.
for item in obj:
temp_list.append(_copy(item, objectmap))
retval = set(temp_list)
objectmap[_saved_id(obj)] = retval
return retval
elif type(obj) is dict:
temp_dict = {}
# Need to save this in the objectmap before recursing because dicts
# might have circular references.
objectmap[_saved_id(obj)] = temp_dict
for key, value in obj.items():
temp_key = _copy(key, objectmap)
temp_dict[temp_key] = _copy(value, objectmap)
return temp_dict
# We don't copy certain objects. This is because copying an emulated file
# object, for example, will cause the destructor of the original one to
# be invoked, which will close the actual underlying file. As the object
# is wrapped and the client does not have access to it, it's safe to not
# wrap it.
elif isinstance(obj, (NamespaceObjectWrapper, emulfile.emulated_file,
emulcomm.EmulatedSocket, emulcomm.TCPServerSocket,
emulcomm.UDPServerSocket, thread.LockType,
virtual_namespace.VirtualNamespace)):
return obj
else:
raise TypeError("_copy is not implemented for objects of type " + str(type(obj)))
except Exception, e:
raise NamespaceInternalError("_copy failed on " + str(obj) + " with message " + str(e))
class NamespaceInternalError(Exception):
"""Something went wrong and we should terminate."""
class NamespaceObjectWrapper(object):
"""
Instances of this class are used to wrap handles and objects returned by
api functions to the user code.
The methods that can be called on these instances are mostly limited to
what is in the allowed_functions_dict passed to the constructor. The
exception is that a simple __repr__() is defined as well as an __iter__()
and next(). However, instances won't really be iterable unless a next()
method is defined in the allowed_functions_dict.
"""
def __init__(self, wrapped_type_name, wrapped_object, allowed_functions_dict):
"""
<Purpose>
Constructor
<Arguments>
self
wrapped_type_name
The name (a string) of what type of wrapped object. For example,
this could be "timerhandle".
wrapped_object
The actual object to be wrapped.
allowed_functions_dict
A dictionary of the allowed methods that can be called on the object.
The keys should be the names of the methods, the values are the
wrapped functions that will be called.
"""
# Only one underscore at the front so python doesn't do its own mangling
# of the name. We're not trying to keep this private in the private class
# variable sense of python where nothing is really private, instead we just
# want a double-underscore in there as extra protection against untrusted
# code being able to access the values.
self._wrapped__type_name = wrapped_type_name
self._wrapped__object = wrapped_object
self._wrapped__allowed_functions_dict = allowed_functions_dict
def __getattr__(self, name):
"""
When a method is called on an instance, we look for the method in the
allowed_functions_dict that was provided to the constructor. If there
is such a method in there, we return a function that will properly
invoke the method with the correct 'self' as the first argument.
"""
if name in self._wrapped__allowed_functions_dict:
wrapped_func = self._wrapped__allowed_functions_dict[name]
def __do_func_call(*args, **kwargs):
return wrapped_func(self._wrapped__object, *args, **kwargs)
return __do_func_call
else:
# This is the standard way of handling "it doesn't exist as far as we
# are concerned" in __getattr__() methods.
raise AttributeError, name
def __iter__(self):
"""
We provide __iter__() as part of the class rather than through __getattr__
because python won't look for the attribute in the object to determine if
the object is iterable, instead it will look directly at the class the
object is an instance of. See the docstring for next() for more info.
"""
return self
def next(self):
"""
We provide next() as part of the class rather than through __getattr__
because python won't look for the attribute in the object to determine if
the object is iterable, instead it will look directly at the class the
object is an instance of. We don't want everything that is wrapped to
be considered iterable, though, so we return a TypeError if this gets
called but there isn't a wrapped next() method.
"""
if "next" in self._wrapped__allowed_functions_dict:
return self._wrapped__allowed_functions_dict["next"](self._wrapped__object)
raise TypeError("You tried to iterate a non-iterator of type " + str(type(self._wrapped__object)))
def __repr__(self):
return "<Namespace wrapped " + self._wrapped__type_name + ": " + repr(self._wrapped__object) + ">"
def __hash__(self):
return _saved_hash(self._wrapped__object)
def __eq__(self, other):
"""In addition to __hash__, this is necessary for use as dictionary keys."""
# We could either assume "other" is a wrapped object and try to compare
# its wrapped object against this wrapped object, or we could just compare
# the hashes of each. If we try to unwrap the other object, it means you
# couldn't compare a wrapped object to an unwrapped one.
return _saved_hash(self) == _saved_hash(other)
def __ne__(self, other):
"""
It's good for consistency to define __ne__ if one is defining __eq__,
though this is not needed for using objects as dictionary keys.
"""
return _saved_hash(self) != _saved_hash(other)
class NamespaceAPIFunctionWrapper(object):
"""
Instances of this class exist solely to provide function wrapping. This is
done by creating an instance of the class and then making available the
instance's wrapped_function() method to any code that should only be allowed
to call the wrapped version of the function.
"""
def __init__(self, func_dict, is_method=False):
"""
<Purpose>
Constructor.
<Arguments>
self
func_dict
A dictionary whose with the following keys whose values are the
corresponding funcion:
func (required) -- a function or a string of the name
of the method on the underlying object.
args (required)
return (required)
is_method -- if this is an object's method being wrapped
rather than a regular function.
<Exceptions>
None
<Side Effects>
None
<Returns>
None
"""
# Required in func_dict.
self.__func = func_dict["func"]
self.__args = func_dict["args"]
self.__return = func_dict["return"]
self.__is_method = is_method
# Make sure that the __target_func really is a function or a string
# indicating a function by that name on the underlying object should
# be called.
if not _saved_callable(self.__func) and type(self.__func) is not str:
raise TypeError("The func was neither callable nor a string when " +
"constructing a namespace-wrapped function. The object " +
"used for target_func was: " + repr(self.__func))
if type(self.__func) is str:
self.__func_name = self.__func
else:
self.__func_name = self.__func.__name__
def _process_args(self, args):
args_to_return = []
for index in range(len(args)):
# Armon: If there are more arguments than there are type specifications
# and we are using NonCopiedVarArgs, then check against that.
if index >= len(self.__args) and isinstance(self.__args[-1], NonCopiedVarArgs):
arg_type = self.__args[-1]
else:
arg_type = self.__args[index]
# We only copy simple types, which means we only copy ValueProcessor not
# ObjectProcessor arguments.
if isinstance(arg_type, ValueProcessor):
temparg = arg_type.copy(args[index])
elif isinstance(arg_type, ObjectProcessor):
temparg = arg_type.unwrap(args[index])
else:
raise NamespaceInternalError("Unknown argument expectation.")
arg_type.check(temparg)
args_to_return.append(temparg)
return args_to_return
def _process_retval_helper(self, processor, retval):
try:
if isinstance(processor, ValueProcessor):
tempretval = processor.copy(retval)
processor.check(tempretval)
elif isinstance(processor, ObjectProcessor):
processor.check(retval)
tempretval = processor.wrap(retval)
elif processor is None:
if retval is not None:
raise InternalRepyError("Expected None but wasn't.")
tempretval = None
else:
raise InternalRepyError("Unknown retval expectation.")
return tempretval
except RepyArgumentError, err:
raise InternalRepyError("Invalid retval type: %s" % err)
def _process_retval(self, retval):
try:
# Allow the return value to be a tuple of processors.
if type(retval) is tuple:
if len(retval) != len(self.__return):
raise InternalRepyError("Returned tuple of wrong size: %s" % str(retval))
tempretval = []
for index in range(len(retval)):
tempitem = self._process_retval_helper(self.__return[index], retval[index])
tempretval.append(tempitem)
tempretval = tuple(tempretval)
else:
tempretval = self._process_retval_helper(self.__return, retval)
except Exception, e:
raise InternalRepyError(
"Function '" + self.__func_name + "' returned with unallowed return type " +
str(type(retval)) + " : " + str(e))
return tempretval
def wrapped_function(self, *args, **kwargs):
"""
<Purpose>
Act as the function that is wrapped but perform all required sanitization
and checking of data that goes into and comes out of the underlying
function.
<Arguments>
self
*args
**kwargs
The arguments to the underlying function.
<Exceptions>
NamespaceViolationError
If some aspect of the arguments or function call is not allowed.
Anything else that the underlying function may raise.
<Side Effects>
Anything that the underyling function may do.
<Returns>
Anything that the underlying function may return.
"""
try:
# We don't allow keyword args.
if kwargs:
raise RepyArgumentError("Keyword arguments not allowed when calling %s." %
self.__func_name)
if self.__is_method:
# This is a method of an object instance rather than a standalone function.
# The "self" argument will be passed implicitly by python in some cases, so
# we remove it from the args we check. For the others, we'll add it back in
# after the check.
args_to_check = args[1:]
else:
args_to_check = args
if len(args_to_check) != len(self.__args):
if not self.__args or not isinstance(self.__args[-1:][0], NonCopiedVarArgs):
raise RepyArgumentError("Function '" + self.__func_name +
"' takes " + str(len(self.__args)) + " arguments, not " +
str(len(args_to_check)) + " as you provided.")
args_copy = self._process_args(args_to_check)
args_to_use = None
# If it's a string rather than a function, then this is our convention
# for indicating that we want to wrap the function of this particular
# object. We use this if the function to wrap isn't available without
# having the object around, such as with real lock objects.
if type(self.__func) is str:
func_to_call = _saved_getattr(args[0], self.__func)
args_to_use = args_copy
else:
func_to_call = self.__func
if self.__is_method:
# Sanity check the object we're adding back in as the "self" argument.
if not isinstance(args[0], (NamespaceObjectWrapper, emulfile.emulated_file,
emulcomm.EmulatedSocket, emulcomm.TCPServerSocket,
emulcomm.UDPServerSocket, thread.LockType,
virtual_namespace.VirtualNamespace)):
raise NamespaceInternalError("Wrong type for 'self' argument.")
# If it's a method but the function was not provided as a string, we
# actually do have to add the first argument back in. Yes, this whole
# area of code is ugly.
args_to_use = [args[0]] + args_copy
else:
args_to_use = args_copy
retval = func_to_call(*args_to_use)
return self._process_retval(retval)
except RepyException:
# TODO: this should be changed to RepyError along with all references to
# RepyException in the rest of the repy code.
# We allow any RepyError to continue up to the client code.
raise
except:
# Code evaluated inside a `VirtualNamespace` may raise arbitrary
# errors, including plain Python exceptions. Reraise these errors
# so that the calling user code sees them.
# (Otherwise, things like `NameError`s in a virtual namespace
# crash the sandbox despite being wrapped in `try`/`except`,
# see SeattleTestbed/repy_v2#132.)
if type(args[0]) == virtual_namespace.VirtualNamespace:
raise
# Non-`RepyException`s outside of `VirtualNamespace` methods
# are unexpected and indicative of a programming error on
# our side, so we terminate.
_handle_internalerror("Unexpected exception from within Repy API", 843)
|
mit
| 8,661,446,760,081,302,000 | 31.057028 | 109 | 0.655308 | false |
tsg-/pyeclib
|
pyeclib/utils.py
|
1
|
3062
|
# Copyright (c) 2013, 2014, Kevin Greenan (kmgreen2@gmail.com)
# Copyright (c) 2014, Tushar Gohad (tusharsg@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. THIS SOFTWARE IS
# PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import traceback
def positive_int_value(param):
# Returns value as a positive int or raises ValueError otherwise
try:
value = int(param)
assert value > 0
except (TypeError, ValueError, AssertionError):
# Handle: TypeError for 'None', ValueError for non-int strings
# and AssertionError for values <= 0
raise ValueError('Must be an integer > 0, not "%s".' % param)
return value
def import_class(import_str):
"""
Returns a class from a string that specifies a module and/or class
:param import_str: import path, e.g. 'httplib.HTTPConnection'
:returns imported object
:raises: ImportedError if the class does not exist or the path is invalid
"""
(mod_str, separator, class_str) = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def create_instance(import_str, *args, **kwargs):
"""
Returns instance of class which imported by import path.
:param import_str: import path of class
:param \*args: indexed arguments for new instance
:param \*\*kwargs: keyword arguments for new instance
:returns: instance of imported class which instantiated with
arguments *args and **kwargs
"""
try:
object_class = import_class(import_str)
except Exception:
raise
instance = object_class(*args, **kwargs)
return instance
|
bsd-2-clause
| 2,439,260,725,972,171,000 | 39.826667 | 78 | 0.710973 | false |
ssh0/growing-string
|
triangular_lattice/diecutting/result_count_on_edge.py
|
1
|
9360
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# written by Shotaro Fujimoto
# 2016-12-16
import matplotlib.pyplot as plt
# from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.cm as cm
import numpy as np
import set_data_path
class Visualizer(object):
def __init__(self, subjects):
self.data_path_list = set_data_path.data_path
if len(subjects) != 0:
for subject in subjects:
getattr(self, 'result_' + subject)()
def load_data(self, _path):
data = np.load(_path)
beta = data['beta']
try:
size_dist_ave = data['size_dist_ave']
if len(size_dist_ave) == 0:
raise KeyError
return self.load_data_averaged(_path)
except KeyError:
pass
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
size_dist = data['size_dist']
N0 = np.array([l[1] for l in size_dist], dtype=np.float) / num_of_strings
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist], dtype=np.float) / num_of_strings
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float) / num_of_strings
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def load_data_averaged(self, _path):
data = np.load(_path)
beta = data['beta']
num_of_strings = data['num_of_strings']
frames = data['frames']
Ls = data['Ls'].astype(np.float)
# Ls = (3 * Ls * (Ls + 1) + 1)
# size_dist = data['size_dist']
size_dist_ave = data['size_dist_ave']
N0 = np.array([l[1] for l in size_dist_ave], dtype=np.float)
n0 = N0[1:]
S = np.array([np.sum(l) for l in size_dist_ave], dtype=np.float)
n1 = (S[1:] - n0) * 2.
N = []
for l in size_dist_ave:
dot = np.dot(np.arange(len(l)), np.array(l).T)
N.append(dot)
# N = np.array([np.dot(np.arange(len(l)), np.array(l).T) for l in size_dist_ave])
N_all = 3. * Ls * (Ls + 1.) + 1
N = np.array(N, dtype=np.float)
N_minus = N_all - N
N_minus_rate = N_minus / N_all
n_minus = N_minus[1:] - N_minus[:-1]
n1_ave = n1 / np.sum(n1)
n2 = (6 * Ls[1:]) - (n0 + n1 + n_minus)
self.beta = beta
self.num_of_strings = num_of_strings
self.frames = frames
self.Ls = Ls
self.N = N
self.N_all = N_all
self.N_minus = N_minus
self.N_minus_rate = N_minus_rate
self.S = S
self.n_all = 6 * Ls[1:]
self.n0 = n0
self.n1 = n1
self.n2 = n2
self.n_minus = n_minus
self.n1_ave = n1_ave
def result_N(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Occupied points in the cutting region' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N$')
plt.show()
def result_N_minus_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.N_minus_rate[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('The rate of not occupied site in all N' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$N_{-1} / N_{\mathrm{all}}$')
plt.show()
def result_n0(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n0, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is the only member of \
a subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{0}$')
plt.show()
def result_n1(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n1, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is connected to a \
existing subcluster on the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{1}$')
plt.show()
def result_n2(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n2, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites on the cutting edges which \
is connected to two neighbors.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{2}$')
plt.show()
def result_n_minus(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.n_minus, '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_title('Averaged number of the sites which is not occupied on \
the cutting edges.' +
' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$n_{-1}$')
plt.show()
def result_S(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region.'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
def result_S_rate(self):
fig, ax = plt.subplots()
for i, result_data_path in enumerate(self.data_path_list):
self.load_data(result_data_path)
# ax.plot(self.Ls[1:], self.S[1:] / np.sum(self.S[1:]), '.',
# ax.plot(self.Ls[1:], self.S[1:] / self.n_all, '.',
ax.plot(self.Ls[1:], self.S[1:] / self.N[1:], '.',
label=r'$\beta = %2.2f$' % self.beta,
color=cm.viridis(float(i) / len(self.data_path_list)))
ax.legend(loc='best')
ax.set_ylim([0, ax.get_ylim()[1]])
ax.set_title('Averaged number of the subclusters in the cutted region'
+ ' (normalized)'
+ ' (sample: {})'.format(self.num_of_strings))
ax.set_xlabel(r'Cutting size $L$')
ax.set_ylabel(r'$S$')
plt.show()
if __name__ == '__main__':
# subject: 'N', 'N_minus_rate', 'n0', 'n1', 'n2', 'n_minus', 'S'
main = Visualizer(
[
# 'N',
# 'N_minus_rate',
# 'n0',
# 'n1',
# 'n2',
# 'n_minus',
'S',
# 'S_rate'
]
)
|
mit
| -8,337,780,784,412,532,000 | 36.44 | 89 | 0.49797 | false |
grantcolasurdo/geras
|
fifth_edition/alignment.py
|
1
|
3115
|
"""Alignment object class"""
__author__ = "Grant Colasurdo"
class Alignment:
SHORT_TO_LONG = {
'N': 'Neutral',
'G': 'Good',
'E': 'Evil',
'C': 'Chaotic',
'L': 'Lawful',
'': ''
}
def __init__(self, short_string: str=""):
if "L" in short_string:
self._LawChaos = "L"
if "C" in short_string:
self._LawChaos = "C"
if "G" in short_string:
self._GoodEvil = "G"
if "E" in short_string:
self._GoodEvil = "E"
if "N" in short_string:
if "NG" in short_string or "NE" in short_string:
self._LawChaos = "N"
if "LN" in short_string or "CN" in short_string:
self._GoodEvil = "N"
if short_string == "N":
self._LawChaos = "N"
self._GoodEvil = "N"
@property
def short_string(self):
string = self._LawChaos + self._GoodEvil
if string == "NN":
string = "N"
return string
@property
def long_string(self):
law_chaos = self.short_to_long[self._LawChaos]
good_evil = self.short_to_long[self._GoodEvil]
string = (law_chaos + " " + good_evil).strip()
if string == "Neutral Neutral":
string = "True Neutral"
return string
@property
def is_lawful(self):
return self._LawChaos == "L"
@is_lawful.setter
def is_lawful(self, value: bool):
if value:
self._LawChaos = "L"
elif self.is_lawful:
self._LawChaos = ""
else:
pass
@property
def is_good(self):
return self._GoodEvil == "G"
@is_good.setter
def is_good(self, value: bool):
if value:
self._GoodEvil = "G"
elif self.is_good:
self._GoodEvil = ""
else:
pass
@property
def is_chaotic(self):
return self._LawChaos == "C"
@is_chaotic.setter
def is_chaotic(self, value: bool):
if value:
self._LawChaos = "C"
elif self.is_chaotic:
self._LawChaos = ""
else:
pass
@property
def is_evil(self):
return self._GoodEvil == "E"
@is_evil.setter
def is_evil(self, value: bool):
if value:
self._GoodEvil = "E"
elif self.is_evil:
self._GoodEvil = ""
else:
pass
@property
def is_neutral_law_chaos(self):
return self._LawChaos == "N"
@is_neutral_law_chaos.setter
def is_neutral_law_chaos(self, value: bool):
if value:
self._LawChaos = "N"
elif self.is_neutral_law_chaos:
self._LawChaos = ""
else:
pass
@property
def is_neutral_good_evil(self):
return self._GoodEvil == "N"
@is_neutral_good_evil.setter
def is_neutral_good_evil(self, value: bool):
if value:
self._GoodEvil = "N"
elif self.is_neutral_good_evil:
self._GoodEvil = ""
else:
pass
|
gpl-2.0
| -2,634,461,766,640,477,000 | 23.527559 | 60 | 0.488925 | false |
PJB3005/MoMMI
|
MoMMI/Modules/chance.py
|
1
|
3714
|
import asyncio
import random
from typing import Match
from discord import Message
from MoMMI import command, MChannel
@command("pick", r"(?:pick|choose)\s*\((.*?)\)")
async def pick_command(channel: MChannel, match: Match, message: Message) -> None:
choices = [x.strip() for x in match.group(1).split(",")]
if len(choices) < 2:
await channel.send("You gotta provide at least 2 options.")
return
choice = random.choice(choices)
await channel.send(f"**{choice}**")
@command("roll", r"(\d+)d(\d+)(?:\+(\d+))?")
async def roll_command(channel: MChannel, match: Match, message: Message) -> None:
result = "Results: "
count = int(match.group(1))
if count > 100:
await channel.send("Ok look dude. A minute or two after this dice command got implemented bobda ran a god damn 10000000000000000000000000000d10. Now because it has to ITERATE those dice and 10000000000000000000000000000 is a giant fucking number, that locked up MoMMI completely because no amount of asyncio is gonna save this madness. Thank god for SIGKILL. THEN I got pinged by Intigracy telling me MoMMI locked up. *sigh*")
return
total = 0
for i in range(0, count):
if i > 0:
result += ", "
roll = random.randint(1, int(match.group(2)))
total += roll
result += str(roll)
mod = match.group(3)
if mod is not None:
result += f" + {mod}"
total += int(mod)
result += f" = {total}"
await channel.send(result)
@command("rand", r"rand\s*(-?\d+)\s*(-?\d+)")
async def rand_command(channel: MChannel, match: Match, message: Message) -> None:
msg = str(random.randint(int(match.group(1)), int(match.group(2))))
await channel.send(msg)
@command("magic8ball", r"(?:magic|magic8ball)")
async def magic8ball_command(channel: MChannel, match: Match, message: Message) -> None:
choice = random.choice([
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes, definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook: Positive",
"Yes",
"Signs point to: Yes",
"Reply hazy, try again",
"Ask again later",
"Better to not tell you right now",
"Cannot predict now",
"Concentrate, then ask again",
"Do not count on it",
"My reply is: no",
"My sources say: no",
"Outlook: Negative",
"Very doubtful"
])
await channel.send(choice)
async def load(loop: asyncio.AbstractEventLoop) -> None:
from MoMMI.Modules.help import register_help
register_help(__name__, "dice", """The party enters the AI upload.
The room's power systems are completely off. At the back of the room is a hole into the core, molten out of the reinforced wall.
*I walk up to the room's APC and see if the APC still works.*
Everybody roll a dexterity saving throw.
*@MoMMI 1d20+0*
*Results: 1 = 1*""")
register_help(__name__, "magic8ball", """Unable to make important project decisions responsibly?
Need some reliable help from our lord and saviour RNGesus?
Simple, just run @MoMMI magic 'Do I delete the Discord server?' and let NT's latest proven MoMMI Random Number Generator Technology™ decide for you.
*Nanotrasen is not liable for any damages caused - material, bodily or psychologically - as a result of poor decision making as a result of the responses from this feature.*""")
register_help(__name__, "pick", """Man can you believe this? People actually want to do *fair* 50/50 picks between things? Kids these days.
Fine, just run @MoMMI pick(a,b,c) with as many comma separated values as you need. Normies.""")
|
mit
| -9,183,143,027,374,585,000 | 37.268041 | 434 | 0.651401 | false |
openqt/algorithms
|
leetcode/python/lc919-complete-binary-tree-inserter.py
|
1
|
2138
|
# coding=utf-8
import unittest
"""919. Complete Binary Tree Inserter
https://leetcode.com/problems/complete-binary-tree-inserter/description/
A _complete_ binary tree is a binary tree in which every level, except
possibly the last, is completely filled, and all nodes are as far left as
possible.
Write a data structure `CBTInserter` that is initialized with a complete
binary tree and supports the following operations:
* `CBTInserter(TreeNode root)` initializes the data structure on a given tree with head node `root`;
* `CBTInserter.insert(int v)` will insert a `TreeNode` into the tree with value `node.val = v` so that the tree remains complete, **and returns the value of the parent of the inserted`TreeNode`** ;
* `CBTInserter.get_root()` will return the head node of the tree.
**Example 1:**
**Input:** inputs = ["CBTInserter","insert","get_root"], inputs = [[[1]],[2],[]]
**Output:** [null,1,[1,2]]
**Example 2:**
**Input:** inputs = ["CBTInserter","insert","insert","get_root"], inputs = [[[1,2,3,4,5,6]],[7],[8],[]]
**Output:** [null,3,4,[1,2,3,4,5,6,7,8]]
**Note:**
1. The initial given tree is complete and contains between `1` and `1000` nodes.
2. `CBTInserter.insert` is called at most `10000` times per test case.
3. Every value of a given or inserted node is between `0` and `5000`.
Similar Questions:
"""
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class CBTInserter(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
def insert(self, v):
"""
:type v: int
:rtype: int
"""
def get_root(self):
"""
:rtype: TreeNode
"""
# Your CBTInserter object will be instantiated and called as such:
# obj = CBTInserter(root)
# param_1 = obj.insert(v)
# param_2 = obj.get_root()
def test(self):
pass
if __name__ == "__main__":
unittest.main()
|
gpl-3.0
| -8,482,655,244,249,253,000 | 22.875 | 199 | 0.600561 | false |
Fidge123/league-unlock-challenge
|
src/UpdateHallOfFame.py
|
1
|
1936
|
#!/usr/bin/env python3
"""Take match and player data, get champ and save it to the database"""
from os import environ
import psycopg2
# with open("db_config") as file:
# HOST = file.readline().strip()
# DBNAME = file.readline().strip()
# USER = file.readline().strip()
# PASS = file.readline().strip()
#
# CONN_STRING = "host=" + HOST + " dbname=" + DBNAME + " user=" + USER + " password=" + PASS
CONN_STRING = environ["DATABASE_URL"]
# local way
# "host=" + environ['HOST'] + " dbname=" + environ['DBNAME'] + " user=" + environ['USER'] + " password=" + environ['PW']
HOF_UPDATE = "UPDATE player_halloffame SET (playerid, region) = (%s,%s) WHERE hofid = %s AND place = %s;"
def update():
"""Update Hall of Fame data"""
conn = psycopg2.connect(CONN_STRING)
cursor = conn.cursor()
for hofid in range(1, 9):
if hofid == 1:
query = "SELECT id, region, ((kills + assists) / GREATEST(deaths, 1) * 1.0) AS kda FROM player ORDER BY kda DESC LIMIT 3;"
elif hofid == 2:
query = "SELECT id, region FROM player ORDER BY assists DESC LIMIT 3;"
elif hofid == 3:
query = "SELECT id, region FROM player ORDER BY kills DESC LIMIT 3;"
elif hofid == 4:
continue
elif hofid == 5:
query = "SELECT id, region FROM player ORDER BY minion DESC LIMIT 3;"
elif hofid == 6:
continue
elif hofid == 7:
query = "SELECT id, region FROM player ORDER BY highestcrit DESC LIMIT 3;"
elif hofid == 8:
query = "SELECT id, region FROM player ORDER BY ccduration DESC LIMIT 3;"
cursor.execute(query)
result = cursor.fetchall()
place = 1
for player in result:
player_id = player[0]
region = player[1]
data = (player_id, region, hofid, place)
cursor.execute(HOF_UPDATE, data)
place += 1
|
gpl-3.0
| -2,136,385,206,930,364,400 | 36.960784 | 134 | 0.576963 | false |
grantdelozier/TextGWR
|
GWRMain.py
|
1
|
14380
|
import sys
if len(sys.argv) >= 3:
#try:
print sys.argv
args = sys.argv
mode_arg = args[args.index("-mode")+1]
print mode_arg
#############Build Reference File Mode###############
if mode_arg.lower() == "build_ref_files":
import BuildRef
print "Building Reference Files"
tf = args[args.index("-tf")+1]
print tf
rf_std_out = args[args.index("-rf_std_out")+1]
print rf_std_out
rf_obs_out = args[args.index("-rf_obs_out")+1]
print rf_obs_out
if '-wordlist' in args:
wordlist = args[args.index("-wordlist")+1]
if '-listuse' in args:
listuse = args[args.index("-listuse")+1]
else: listuse = 'NA'
else: wordlist = 'any'
BuildRef.Build_ref_files(tf, rf_std_out, rf_obs_out, wordlist, listuse)
print "~~~~~~~~~Building Complete~~~~~~~~"
print "Check: ", rf_std_out, " AND ", rf_obs_out
#############Create Weighted(u) matrix and Y(u) vector files################
if mode_arg.lower() == "create_wu_y":
import CreateWu_Y
print "Creating Weight and Y vector files"
try:
if '-kern' in args:
fullarg = args[args.index("-kern")+1]
kerntype = fullarg[:fullarg.rfind('_')]
print kerntype
dist = float(fullarg[fullarg.rfind('_')+1:])
print dist
else:
kerntype = 'quartic'
dist = 900000.0
except:
print "Kernel Argument is not formmated correctly"
print "it should be something like quartic_900000 or epanech_800000 (units must be meters)"
print "run with -help for more options"
sys.exit("Error")
try:
ulist = (args[args.index("-ulist")+1]).split(',')
except:
print "Your ulist is not formatted correctly"
print "it should be something like 400,8,3000 with no spaces between the numbers"
sys.exit("Error")
try:
ptbl = args[args.index("-ptbl")+1]
except:
print "ERROR ON -ptbl argument"
print "This argument should contain the name of the table which was created using DB_Load"
sys.exit("Error")
if '-pointgrid' in args:
pointgrid = args[args.index("-pointgrid")+1]
else: pointgrid = 'pointgrid_5_clip'
try:
conn = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
if '-zeroed' in args:
zval = args[args.index('-zeroed')+1]
if zval.lower() == 'f':
zeroed = False
else: zeroed = True
else: zeroed = True
rf_obs_in = args[args.index("-rf_obs_in")+1]
w_y_direct = args[args.index("-wu_y_dir_out")+1]
CreateWu_Y.create(w_y_direct, ulist, kerntype, dist, conn, ptbl, pointgrid, zeroed, rf_obs_in)
#################Create and Load Database With People/Documents####################
if mode_arg.lower() == "db_load":
import DB_Load
print "Beginning DB Loading Process"
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
tbl_name = args[args.index("-ptbl")+1]
try:
conn = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
DB_Load.Load(f, tbl_name, conn)
#################Train the prediction model on given file using GWR####################
if mode_arg.lower() == "train":
import Train
print "Beginning GWR Train Process"
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
rf_obs_in = args[args.index("-rf_obs_in")+1]
rf_std_in = args[args.index("-rf_std_in")+1]
wu_y_direct = args[args.index("-wu_y_dir_in")+1]
b_direct = args[args.index("-b_dir_out")+1]
if '-lam' in args:
lam = float(args[args.index("-lam")+1])
else: lam = 0
try:
ulist = (args[args.index("-ulist")+1]).split(',')
except:
print "Your ulist is not formatted correctly"
print "it should be something like 400,8,3000 with no spaces between the numbers"
sys.exit("Error")
try:
if '-kern' in args:
fullarg = args[args.index("-kern")+1]
kerntype = fullarg[:fullarg.rfind('_')]
print kerntype
dist = float(fullarg[fullarg.rfind('_')+1:])
print dist
else:
kerntype = 'quartic'
dist = 900000.0
except:
print "Kernel Argument is not formmated correctly"
print "it should be something like quartic_900000 or epanech_800000 (units must be meters)"
print "run with -help for more options"
sys.exit("Error")
Train.train(f, rf_obs_in, rf_std_in, wu_y_direct, ulist, kerntype, lam, b_direct)
if mode_arg.lower() == "morans_calc":
import MCV1
print "Beginning Morans Calc Process"
if '-tf' in args:
f = args[args.index("-tf")+1]
try:
if '-kern' in args:
fullarg = args[args.index("-kern")+1]
kerntype = fullarg[:fullarg.rfind('_')]
print kerntype
dist = float(fullarg[fullarg.rfind('_')+1:])
print dist
else:
kerntype = 'quartic'
dist = 100000.0
except:
print "Kernel Argument is not formmated correctly"
print "it should be something like quartic_900000 or uniform_50000 (units must be meters)"
print "run with -help for more options"
sys.exit("Error")
if "-pointgrid" in args:
pointgrid = args[args.index("-pointgrid")+1]
else: pointgrid = 'pointgrid_5_clip'
ptbl = args[args.index("-ptbl")+1]
outmorans = args[args.index("-outmoranfile")+1]
MCV1.calc(f, ptbl, pointgrid, kerntype, dist, outmorans)
if mode_arg.lower() == "gi_calc":
import GiStatV1
print "Beginning Getis Ord Gi* Statistic Calculation"
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
if '-wordlist' in args:
wordlist = args[args.index("-wordlist")+1]
if '-listuse' in args:
listuse = args[args.index("-listuse")+1]
else: listuse = 'NA'
else: wordlist = 'any'
try:
conn = args[args.index('-conn')+1]
except:
print "Problem parsing the connection information provided"
try:
ptbl = args[args.index("-ptbl")+1]
except:
print "ERROR ON -ptbl argument"
print "This argument should contain the name of the table which was created using DB_Load"
sys.exit("Error")
try:
if '-kern' in args:
fullarg = args[args.index("-kern")+1]
kerntype = fullarg[:fullarg.rfind('_')].lower()
print kerntype
dist = float(fullarg[fullarg.rfind('_')+1:])
print dist
else:
kerntype = 'quartic'
dist = 900000.0
except:
print "Kernel Argument is not formmated correctly"
print "it should be something like quartic_900000 or epanech_800000 (units must be meters)"
print "run with -help for more options"
sys.exit("Error")
if "-pointgrid" in args:
pointgrid = args[args.index("-pointgrid")+1]
else: pointgrid = "None"
outf = args[args.index("-gi_out")+1]
GiStatV1.calc(f, ptbl, kerntype, dist, conn, outf, wordlist, listuse, pointgrid)
if mode_arg.lower() == "test":
import Test
if '-tf' in args:
f = args[args.index("-tf")+1]
elif '-df' in args:
f = args[args.index("-df")+1]
elif '-tstf' in args:
f = args[args.index("-tstf")+1]
rf_std_in = args[args.index("-rf_std_in")+1]
b_direct = args[args.index("-b_dir_in")+1]
try:
ulist = (args[args.index("-ulist")+1]).split(',')
except:
print "Your ulist is not formatted correctly"
print "it should be something like 400,8,3000 with no spaces between the numbers"
sys.exit("Error")
try:
if '-kern' in args:
fullarg = args[args.index("-kern")+1]
kerntype = fullarg[:fullarg.rfind('_')]
print kerntype
dist = float(fullarg[fullarg.rfind('_')+1:])
print dist
else:
kerntype = 'quartic'
dist = 900000.0
except:
print "Kernel Argument is not formmated correctly"
print "it should be something like quartic_900000 or epanech_800000 (units must be meters)"
print "run with -help for more options"
sys.exit("Error")
Test.test(f, rf_std_in, b_direct, ulist, kerntype)
#except:
# print "ERROR: THERE WAS A PROBLEM INTERPRETING THE ARGUMENTS"
# print "Must Specify -mode"
# print "Execute GWRMain.py -help for information"
elif "-help" in sys.argv:
print "---------------------"
print "MODE ARGUMENTS"
print "-mode"
print "db_load ((-tf OR -df OR -tstf), -conn, -tbl)"
print "Build_ref_files (-tf, -rf_std_out, -rf_obs_out, -wordlist(OPTIONAL))"
print "NOT FUNCTIONAL: Create_Wu ((-tf OR -df OR -tstf), -kern, -ulist, -wu_dir_out)"
print "NOT FUNCTIONAL: Create_Y ((-tf OR -df OR -tstf), -ulist, -y_dir_out)"
print "Create_Wu_Y (-ptbl, -conn, -pointgrid(OPTIONAL), -kern(OPTIONAL), -zeroed(OPTOINAL), -ulist, -wu_y_dir_out, -rf_obs_in)"
print "Morans_Calc (-ptbl, -pointgrid, -tf, -outmoranfile, -kern, -rf_std_in, -wordlist(OPTIONAL))"
print "Train (-tf, (-wu_y_dir_in OR (-y_dir_in AND -wu_dir_in), -rf_std_in, -rf_obs_in, -ulist, -b_dir_out, -lambda))"
print "Gi_Calc (-tf, -ptbl, -conn, -kern, -gi_out, -pointgrid(OPTIONAL))"
print "NOT FUNCTIONAL: Test (-tstf, -rf_std_in, -b_dir_in, -pred_out)"
print "NOT FUNCTIONAL: Train_Test (-tf, -tstf, (-wu_y_dir_in OR (-y_dir_in AND -wu_dir_in), -rf_std_in, -rf_obs_in, -ulist, -b_dir_out, -pred_out, -lambda))"
print "---------------------"
print "Train File"
print "-tf"
print "absolute path of train file"
print "---------------------"
print "Devel File"
print "-df"
print "absolute path of devel file"
print "---------------------"
print "Test File"
print "-tstf"
print "absolute path of test file"
print "---------------------"
print "Standard Deviation Reference File (in)"
print "-rf_std_in"
print "absolute path of std_dev reference"
print "---------------------"
print "Standard Deviation Reference File (out)"
print "-rf_std_out"
print "absolute path of std_dev reference"
print "---------------------"
print "Observation (aka people, users) Reference File (in)"
print "-rf_obs_in"
print "absolute path of std_dev reference"
print "---------------------"
print "Observation (aka people, users) Reference File (out)"
print "-rf_obs_out"
print "absolute path of std_dev reference"
print "---------------------"
print "Weight Matrix Directory (out)"
print "-wu_dir_out"
print "---------------------"
print "Weight Matrix Directory (in)"
print "-wu_dir_in"
print "---------------------"
print "Y(u) vector Directory (out)"
print "-y_dir_out"
print "---------------------"
print "Y(u) vector Directory (in)"
print "-y_dir_in"
print "---------------------"
print "Weight Matrix and Y(u) vector Directory (out)"
print "-wu_y_dir_out"
print "---------------------"
print "Weight Matrix and Y(u) vector Directory (in)"
print "-wu_y_dir_in"
print "---------------------"
print "Ulist: a list of grid point id's; a different regression is trained for each one"
print "-ulist"
print "e.g. -ulist 900,2000,2100,4000,5000"
print "---------------------"
print "Score Files (out)"
print "-b_dir_out"
print "directory where score files will be written to"
print "---------------------"
print "Score Files (in)"
print "-b_dir_in"
print "directory where score files will be read from"
print "---------------------"
print "lambda (cost value), default set to 1"
print "-lambda"
print "---------------------"
print "Predictions Out"
print "-pred_out"
print "Absolute path of a file predictions written to"
print "---------------------"
print "Kernel Function (OPTIONAL)(defaults to quartic_900000) (<method>_<number_of_meters>)"
print "-kern"
print "e.g. quartic, epanech"
print "---------------------"
print "-Zeroed Kernel (OPTIONAL)"
print "-zeroed"
print "e.g. -zeroed F"
print "---------------------"
print "-Person Table: name of person table that you are creating/reading from in postgres"
print "-ptbl"
print "i.e. do not begin with symbols/numbers and avoid upper case"
print "---------------------"
print "-Word List File: name of a file that contains words (one per line) that you want to include in the model"
print "-wordlist"
print "OPTIONAL: if left unspecified will default to all possible words in the train file"
print "Should be an absolute path"
else:
print "###ERRROR####: You did not specify enough arguments"
print "Try -help"
|
apache-2.0
| -2,837,285,821,625,328,600 | 33.15677 | 161 | 0.525174 | false |
jeroanan/Aquarius
|
tests/TestAquarius.py
|
1
|
4439
|
import unittest
from unittest.mock import Mock
from aquarius.Aquarius import Aquarius
from aquarius.Harvester import Harvester
from aquarius.Interactor import Interactor
from aquarius.InteractorFactory import InteractorFactory
from aquarius.interactors.AddBookInteractor import AddBookInteractor
from aquarius.interactors.GetBookDetailsInteractor import GetBookDetailsInteractor
from aquarius.interactors.GetBookTypeInteractor import GetBookTypeInteractor
from aquarius.interactors.ListBooksByFirstLetterInteractor import ListBooksByFirstLetterInteractor
from aquarius.output.web.Web import Web
class TestAquarius(unittest.TestCase):
def setUp(self):
self.setup_interactors()
self.__app = Aquarius("dummy", "whatever", self.__interactor_factory)
self.__setup_harvester_mock()
self.__gotCallback = False
def setup_interactors(self):
self.__search_book_interactor = Mock(Interactor)
self.__add_book_interactor = Mock(AddBookInteractor)
self.__list_books_by_first_letter_interactor = Mock(ListBooksByFirstLetterInteractor)
self.__get_book_details_interactor = Mock(GetBookDetailsInteractor)
self.__get_book_type_interactor = Mock(GetBookTypeInteractor)
self.__interactor_factory = InteractorFactory()
self.__interactor_factory.get_search_book_interactor = Mock(return_value=self.__search_book_interactor)
self.__interactor_factory.get_add_book_interactor = Mock(return_value=self.__add_book_interactor)
self.__interactor_factory.get_list_books_by_first_letter_interactor = \
Mock(return_value=self.__list_books_by_first_letter_interactor)
self.__interactor_factory.get_book_details_interactor = Mock(return_value=self.__get_book_details_interactor)
self.__interactor_factory.get_book_type_interactor = Mock(return_value=self.__get_book_type_interactor)
def __setup_harvester_mock(self):
self.__harvester = harvester = Harvester()
harvester.do_harvest = Mock()
self.__app.set_harvester(harvester)
def test_search_books_uses_interactor_factory(self):
self.__app.search_books("")
self.assertTrue(self.__interactor_factory.get_search_book_interactor.called)
def test_search_books_calls_interactor(self):
self.__app.search_books("")
self.assertTrue(self.__search_book_interactor.execute.called)
def test_list_books_by_first_letter_uses_interactor_factory(self):
self.__app.list_books_by_first_letter("b")
self.assertTrue(self.__interactor_factory.get_list_books_by_first_letter_interactor.called)
def test_list_books_by_first_letter_calls_interactor(self):
self.__app.list_books_by_first_letter("b")
self.assertTrue(self.__list_books_by_first_letter_interactor.execute.called)
def test_get_book_details_uses_interactor_factory(self):
self.__app.get_book_details(0)
self.assertTrue(self.__interactor_factory.get_book_details_interactor.called)
def test_get_book_details_calls_interactor(self):
self.__app.get_book_details(0)
self.assertTrue(self.__get_book_details_interactor.execute.called)
def test_get_book_type_uses_interactor_factory(self):
self.__app.get_book_type("EPUB")
self.assertTrue(self.__interactor_factory.get_book_type_interactor.called)
def test_get_book_type_calls_interactor(self):
self.__app.get_book_type("EPUB")
self.assertTrue(self.__get_book_type_interactor.execute.called)
def test_add_book_uses_interactor_factory(self):
self.__app.add_book(None)
self.assertTrue(self.__interactor_factory.get_add_book_interactor.called)
def test_add_book_calls_interactor(self):
self.__app.add_book(None)
self.assertTrue(self.__add_book_interactor.execute.called)
def test_call_main(self):
output = Web(self.__app, None)
output.main = Mock()
self.__app.set_output(output)
self.__app.main()
self.assertTrue(output.main.called)
def test_calling_harvest_books_calls_harvester(self):
self.__app.harvest_books()
self.assertTrue(self.__harvester.do_harvest.called)
def test_calling_harvest_books_does_not_call_harvester_when_is_harvesting_set(self):
self.__app.is_harvesting = True
self.__app.harvest_books()
self.assertFalse(self.__harvester.do_harvest.called)
|
gpl-3.0
| -6,790,716,137,706,200,000 | 45.25 | 117 | 0.713449 | false |
Mandelag/util
|
AGSMSLayerDownloader/consolidator.py
|
1
|
1493
|
import os, sys, arcpy, json
def consolidate(directory, output, output_name):
#import pdb
scriptpath = sys.path[0]
inputdir = os.path.join(scriptpath, directory)
outputdir = os.path.join(scriptpath, output)
files = os.listdir(inputdir)
n_files = len(files)
jsons = []
features = {}
counter = 0
group = 0
once = True;
for filename in files:
file_full_name = os.path.join(inputdir, filename)
output_full_name = os.path.join(outputdir, filename)
#arcpy.JSONToFeatures_conversion(file_full_name, output_full_name)
with open(file_full_name) as input:
if once:
features = json.load(input)
once = False
else:
features["features"].extend(json.load(input)["features"])
counter = counter + 1;
output_lists = []
if counter >= 500:
group = group + 1
output_name = output_name+"_"+str(group)+".json"
with open(output_name, "w+") as outputfile:
outputfile.write(json.dumps(features))
output_lists.append(output_name)
counter = 0
features = {}
once = True
print(output_name)
if __name__ == "__main__":
if len(sys.argv) > 3:
consolidate(sys.argv[1], sys.argv[2], sys.argv[3])
else:
print("Usage: \n python consolidator.py [input_folder] [output_folder] [output_name]")
exit()
|
gpl-3.0
| -4,808,712,708,481,130,000 | 30.104167 | 97 | 0.556597 | false |
boztalay/MeshnetTest
|
node.py
|
1
|
7721
|
from Tkinter import *
from basics import *
NODE_RADIUS = 5
NODE_COLOR = "green"
NODE_PENDING_COLOR = "purple"
CONNECTION_COLOR = "yellow"
PACKETS_SENT_MAX = 100
class NodeError(Exception):
pass
class Node:
def __init__(self, location):
self.location = location
self.connections = []
self.packetBuffer = []
self.packetsSent = []
self.connectionsTriedForDests = {}
self.connectionsFailedForDests = {}
self.isPendingAction = False
def addPacketToBuffer(self, packet, sourceNode):
# If this is the first time this node has gotten a packet for this destination,
# this will ensure that the connection to the node that first send this node
# a packet with this destination will be at the front of the list of connections
# to try for it. This makes sure that 1) the packet isn't just sent back to that node
# and 2) we can default to sending the packet back to it if all other connections
# have been tried. If the packet is marked as hitting a dead end, we add that
# connection to a list of failed connections for that destination.
if packet.destNode not in self.connectionsTriedForDests:
connectionsTriedForDest = []
self.connectionsTriedForDests[packet.destNode] = connectionsTriedForDest
else:
connectionsTriedForDest = self.connectionsTriedForDests[packet.destNode]
if packet.destNode not in self.connectionsFailedForDests:
connectionsFailedForDest = []
self.connectionsFailedForDests[packet.destNode] = connectionsFailedForDest
else:
connectionsFailedForDest = self.connectionsFailedForDests[packet.destNode]
connectionPacketCameFrom = None
for connection in self.connections:
if connection.destNode is sourceNode:
connectionPacketCameFrom = connection
if connectionPacketCameFrom is not None:
if connectionPacketCameFrom not in connectionsTriedForDest:
connectionsTriedForDest.append(connectionPacketCameFrom)
if packet.foundDeadEnd:
packet.foundDeadEnd = False
if connectionPacketCameFrom not in connectionsFailedForDest:
connectionsFailedForDest.append(connectionPacketCameFrom)
self.packetBuffer.append(packet)
def setPendingAction(self):
self.isPendingAction = True
def clearPendingAction(self):
self.isPendingAction = False
def connectTo(self, destNode):
self.clearPendingAction()
if destNode is self:
raise NodeError("Tried to connect a node to itself")
for connection in self.connections:
if connection.destNode is destNode:
raise NodeError("Tried to connect to a node that already has a connection")
self.connections.append(Connection(self, destNode))
def disconnectFrom(self, destNode):
for connection in self.connections:
if connection.destNode is destNode:
self.connections.remove(connection)
return
raise NodeError("Tried to disconnect from a node that doesn't have a connection")
def update(self):
unsendablePackets = []
for packet in self.packetBuffer:
if packet.destNode is self:
self.receivePacket(packet)
continue
sortedConnectionsForDest = sorted(self.connections, key=lambda connection: connection.destNode.distanceTo(packet.destNode))
connectionsTriedForDest = self.connectionsTriedForDests[packet.destNode]
connectionsFailedForDest = self.connectionsFailedForDests[packet.destNode]
connectionsToIgnore = []
if len(connectionsTriedForDest) > 0:
connectionsToIgnore.append(connectionsTriedForDest[0])
if packet in self.packetsSent:
# This means this node got a packet that it's already sent out,
# so there's probably a cycle in the connection it tried last.
# This will remove that connection from consideration (the last one tried)
connectionsToIgnore.append(connectionsTriedForDest[-1])
couldSend = False
for connection in sortedConnectionsForDest:
if connection not in connectionsFailedForDest and connection not in connectionsToIgnore:
connection.sendPacket(packet)
connectionsTriedForDest.append(connection)
couldSend = True
self.packetsSent.append(packet)
if len(self.packetsSent) > PACKETS_SENT_MAX:
self.packetsSent.pop(0)
break
if not couldSend:
if len(connectionsTriedForDest) > 0:
# No connections left to try, send it back to the node we got it from
# Index 0 will always be the first node that sent a packet with this destination
# Don't add the packet to the packets sent list, we aren't sending it on
packet.foundDeadEnd = True
connectionsTriedForDest[0].sendPacket(packet)
elif packet not in unsendablePackets:
unsendablePackets.append(packet)
self.packetBuffer = unsendablePackets
def updateConnections(self):
for connection in self.connections:
connection.update()
def draw(self, canvas):
nodeColor = NODE_COLOR
if self.isPendingAction:
nodeColor = NODE_PENDING_COLOR
canvas.create_rectangle(self.location.x - NODE_RADIUS, self.location.y - NODE_RADIUS,
self.location.x + NODE_RADIUS, self.location.y + NODE_RADIUS, outline=nodeColor)
if len(self.packetBuffer) > 0:
innerColor = self.packetBuffer[0].makeColor()
canvas.create_rectangle(self.location.x - (NODE_RADIUS - 2), self.location.y - (NODE_RADIUS - 2),
self.location.x + (NODE_RADIUS - 2), self.location.y + (NODE_RADIUS - 2), fill=innerColor)
def receivePacket(self, packet):
print "Got a packet!"
def distanceTo(self, otherNode):
return self.location.distanceTo(otherNode.location)
class Connection:
def __init__(self, sourceNode, destNode):
self.sourceNode = sourceNode
self.destNode = destNode
self.packetsToSend = []
def sendPacket(self, packet):
self.packetsToSend.append(packet)
def update(self):
while len(self.packetsToSend) > 0:
self.destNode.addPacketToBuffer(self.packetsToSend.pop(), self.sourceNode)
def draw(self, canvas):
canvas.create_line(self.sourceNode.location.x, self.sourceNode.location.y,
self.destNode.location.x, self.destNode.location.y, fill = CONNECTION_COLOR)
class Packet:
def __init__(self, sourceNode, destNode, message):
self.sourceNode = sourceNode
self.destNode = destNode
self.foundDeadEnd = False
self.message = message
self.color = None
def makeColor(self):
if self.color is not None:
return self.color
color = self.sourceNode.location.x & 0x3f
color = color << 6
color |= self.sourceNode.location.y & 0x3f
color = color << 6
color |= self.destNode.location.x & 0x3f
color = color << 6
color |= self.destNode.location.y & 0x3f
self.color = "#%0.6X" % color
return self.color
|
mit
| 7,091,429,230,517,577,000 | 38.594872 | 135 | 0.634892 | false |
zhester/hzpy
|
examples/parseriff.py
|
1
|
2368
|
#!/usr/bin/env python
"""
Example RIFF (WAV contents) Data Parser
Sample data is written to a CSV file for analysis.
If matplotlib and numpy are available, signal plots (DFTs) are generated.
"""
import math
import os
import struct
import wave
try:
import matplotlib.pyplot as plot
import numpy
import numpy.fft as fft
except ImportError:
numeric_packages = False
else:
numeric_packages = True
#=============================================================================
def frame2mag( frame ):
( i, q ) = struct.unpack( '<BB', frame )
return math.sqrt( ( i ** 2 ) + ( q ** 2 ) )
#=============================================================================
def main( argv ):
""" Script execution entry point """
# check usage
if len( argv ) < 2:
print 'You must specify at least an input file.'
return 0
# start and length
start = 0
length = 1024
if len( argv ) > 2:
start = int( argv[ 2 ] )
if len( argv ) > 3:
length = int( argv[ 3 ] )
# open file using wave module
wfile = wave.open( argv[ 1 ], 'rb' )
# print file info
print 'Channels: %d\nSample width: %d\nFrame rate: %d\nFrames: %d' % (
wfile.getnchannels(),
wfile.getsampwidth(),
wfile.getframerate(),
wfile.getnframes()
)
# check for starting offset
if start > 0:
junk = wfile.readframes( start )
# read frames
frames = wfile.readframes( length )
samples = []
for i in range( length ):
index = i * 2
samples.append( frame2mag( frames[ index : ( index + 2 ) ] ) )
# close wave file
wfile.close()
# plot
if numeric_packages == True:
fft_data = fft.fft( samples[ : 1024 ] )
mags = numpy.absolute( fft_data )
mags_db = [ 20 * numpy.log10( mag ) for mag in mags ]
plot.figure( 1 )
plot.plot( samples )
plot.figure( 2 )
plot.plot( mags_db )
plot.show()
# output
oname = argv[ 1 ].replace( '.wav', '.csv' )
ofile = open( oname, 'wb' )
for sample in samples:
ofile.write( '%d\n' % sample )
ofile.close()
# Return success.
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
|
bsd-2-clause
| -7,917,740,915,021,355,000 | 22.919192 | 78 | 0.508868 | false |
AnnaWyszomirska/lesson1_1
|
tests/test_remove_contact_from_group.py
|
1
|
1949
|
from model.contact import Contact
from model.group import Group
import random
def test_remove_contact_from_group(app, db, orm):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="New one"))
if len(db.get_contact_list()) == 0:
app.contact.add(Contact(firstname="Anna", middlename="Joanna", lastname="Wyszomirska", nickname="aneczka",
title="Title", company="New Company", address="My address information",
home="34725475263", mobile="32456234236", work="2364623645", fax="243256452",
email="aniawzs@wp.pl", email2="test1@gmail.com", email3="test2@gmail.com",
homepage="Test", birthyear="1990", annyear="2016",
bday="//div[@id='content']/form/select[1]//option[4]",
bmonth= "//div[@id='content']/form/select[2]//option[3]",
aday="//div[@id='content']/form/select[3]//option[19]",
amonth="//div[@id='content']/form/select[4]//option[3]",
address2="My second address ", privatephone="23415257354735",
comments="Brak uwag"
))
group_id = app.group.random_group_id()
contacts_in_group = app.contact.get_contacts_in_group(group_id)
if len(contacts_in_group) == 0:
ui_list = app.contact.get_contact_list()
contact = random.choice(ui_list)
app.contact.add_contact_into_group(contact.id, group_id)
contact = random.choice(contacts_in_group)
app.contact.remove_contacts_from_group(contact.id, group_id)
contact_ui = app.contact.get_contacts_in_group(group_id)
contact_orm = orm.get_contacts_in_group(Group(id=group_id))
assert sorted(contact_ui, key=Contact.id_or_max) == sorted(contact_orm, key=Contact.id_or_max)
|
apache-2.0
| 5,070,481,090,748,076,000 | 58.090909 | 114 | 0.567984 | false |
ext/slideshow-frontend
|
slideshow/lib/queue.py
|
1
|
3299
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from slideshow.lib.slide import Slide
from slideshow.settings import Settings
import slideshow.event as event
import cherrypy
def div_id(id):
if id >= 0:
return 'queue_%d' % id
elif id == -1:
return 'queue_int'
else:
raise ValueError, 'Invalid id for div_id: %d' % id
class Queue:
def __init__(self, c, id, name, loop):
self.id = id
self.div = div_id(id)
self.name = name
self.loop = loop == 1
self.slides = [Slide(queue=self, **x) for x in c.execute("""
SELECT
`id`,
DATETIME(`timestamp`) AS `timestamp`,
`path`,
`active`,
`assembler`,
`data`
FROM
`slide`
WHERE
`queue_id` = :queue
ORDER BY
`sortorder`
""", {'queue': id}).fetchall()]
def as_json(self):
return {
'id': self.id,
'name': self.name,
'loop': self.loop,
'readonly': self.id <= 1,
'slides': [x.as_json() for x in self.slides]
}
def __len__(self):
return len(self.slides)
def rename(self, c, name):
c.execute("""
UPDATE
`queue`
SET
`name` = :name
WHERE
`id` = :id
""", dict(id=self.id, name=name))
self.name = name
def all(c):
return [Queue(c, **x) for x in c.execute("""
SELECT
`id`,
`name`,
`loop`
FROM
`queue`
""").fetchall()]
def from_id(c, id):
row = c.execute("""
SELECT
`id`,
`name`,
`loop`
FROM
`queue`
WHERE
`id` = :id
LIMIT 1
""", dict(id=id)).fetchone()
if row is None:
return None
return Queue(c, **row)
def add(c, name):
c.execute("""
INSERT INTO `queue` (
`name`
) VALUES (
:name
)
""", dict(name=name))
row_id = c.last_row_id()
n = int(c.execute("SELECT COUNT(*) as `count` FROM `queue`").fetchone()['count'])
# if no previous queue (except default) existed, make this the active
if n == 3:
print 'derp'
activate(row_id)
return from_id(c, row_id)
def delete(c, id):
if id <= 0:
return False
c.execute("""
UPDATE
`slide`
SET
`queue_id` = 0
WHERE
`queue_id` = :id
""", dict(id=id))
c.execute("""
DELETE FROM
`queue`
WHERE
`id` = :id
""", dict(id=id))
return True
def activate(id):
settings = Settings()
with settings:
settings['Runtime.queue'] = id
settings.persist()
event.trigger('config.queue_changed', id)
def set_loop(id, state):
c = cherrypy.thread_data.db
c.execute("""
UPDATE
`queue`
SET
`loop` = :state
WHERE
`id` = :id
""", dict(id=id, state=state))
c.commit()
# to force reloading of queue settings
event.trigger('config.queue_changed', id)
|
agpl-3.0
| 2,793,161,652,640,297,000 | 20.562092 | 85 | 0.449227 | false |
marklar/fowles
|
py/ids_ventilator.py
|
1
|
1430
|
import re
import time
import zmq
#
# The input file is expected to have on each line:
# 1. a video_id
# 2. a tab
# 3. a channel_id
# The provided file (below: INPUT_FILE) matches that format.
#
# cfg
INPUT_FILE = "py/10_pairs_of_vid_and_chan_ids.txt"
PORT = 5557
# globals
context = None
pusher = None
def mk_addr(p):
return "tcp://*:%d" % (p)
def get_pusher():
global context, pusher
if pusher is None:
context = zmq.Context()
pusher = context.socket(zmq.PUSH)
pusher.bind( mk_addr(PORT) )
return pusher
def submit_json(msg):
get_pusher().send_json(msg)
def get_lines(fname):
with open(fname) as f:
return (ln.strip('\n') for ln in f.readlines())
def send(msg):
print "sending: %s" % (msg)
submit_json(msg)
def videos(vid_id):
send({'request': 'videos',
'id': vid_id})
def channels(chan_id):
send({'request': 'channels',
'id': chan_id})
def activities(chan_id):
send({'request': 'activities',
'channelId': chan_id})
def playlistItems(chan_id):
playlist_id = re.sub('^UC', 'UU', chan_id)
send({'request': 'playlistItems',
'playlistId': playlist_id})
def push_video_ids(fname):
for ln in get_lines(fname):
[vid_id, chan_id] = ln.split()
videos(vid_id)
channels(chan_id)
activities(chan_id)
playlistItems(chan_id)
push_video_ids(INPUT_FILE)
|
mit
| -6,243,151,740,370,340,000 | 20.029412 | 60 | 0.599301 | false |
ThomasSweijen/TPF
|
doc/sphinx/conf.py
|
1
|
28022
|
# -*- coding: utf-8 -*-
#
# Yade documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 16 21:49:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# relevant posts to sphinx ML
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/b4fbc8d31d230fc4
# http://groups.google.com/group/sphinx-dev/browse_thread/thread/118598245d5f479b
#####################
## custom yade roles
#####################
##
## http://docutils.sourceforge.net/docs/howto/rst-roles.html
import sys, os, re
from docutils import nodes
from sphinx import addnodes
from sphinx.roles import XRefRole
import docutils
#
# needed for creating hyperlink targets.
# it should be cleand up and unified for both LaTeX and HTML via
# the pending_xref node which gets resolved to real link target
# by sphinx automatically once all docs have been processed.
#
# xrefs: http://groups.google.com/group/sphinx-dev/browse_thread/thread/d719d19307654548
#
#
import __builtin__
if 'latex' in sys.argv: __builtin__.writer='latex'
elif 'html' in sys.argv: __builtin__.writer='html'
elif 'epub' in sys.argv: __builtin__.writer='epub'
else: raise RuntimeError("Must have either 'latex' or 'html' on the command line (hack for reference styles)")
def yaderef_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yref:`` role, by making hyperlink to yade.wrapper.*. It supports :yref:`Link text<link target>` syntax, like usual hyperlinking roles."
id=rawtext.split(':',2)[2][1:-1]
txt=id; explicitText=False
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
explicitText=True
txt,id=m.group(1),m.group(2)
id=id.replace('::','.')
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='http://beta.arcig.cz/~eudoxos/yade/doxygen/?search=%s'%id,**options)
#node=nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='yade.wrapper.html#yade.wrapper.%s'%id,**options)
return [mkYrefNode(id,txt,rawtext,role,explicitText,lineno,options)],[]
def yadesrc_role(role,rawtext,lineno,inliner,options={},content=[]):
"Handle the :ysrc:`` role, making hyperlink to git repository webpage with that path. Supports :ysrc:`Link text<file/name>` syntax, like usual hyperlinking roles. If target ends with ``/``, it is assumed to be a directory."
id=rawtext.split(':',2)[2][1:-1]
txt=id
m=re.match('(.*)\s*<(.*)>\s*',id)
if m:
txt,id=m.group(1),m.group(2)
return [nodes.reference(rawtext,docutils.utils.unescape(txt),refuri='https://github.com/yade/trunk/blob/master/%s'%id)],[] ### **options should be passed to nodes.reference as well
# map modules to their html (rst) filenames. Used for sub-modules, where e.g. SpherePack is yade._packSphere.SpherePack, but is documented from yade.pack.rst
moduleMap={
'yade._packPredicates':'yade.pack',
'yade._packSpheres':'yade.pack',
'yade._packObb':'yade.pack'
}
class YadeXRefRole(XRefRole):
#def process_link
def process_link(self, env, refnode, has_explicit_title, title, target):
print 'TARGET:','yade.wrapper.'+target
return '[['+title+']]','yade.wrapper.'+target
def mkYrefNode(target,text,rawtext,role,explicitText,lineno,options={}):
"""Create hyperlink to yade target. Targets starting with literal 'yade.' are absolute, but the leading 'yade.' will be stripped from the link text. Absolute tergets are supposed to live in page named yade.[module].html, anchored at #yade.[module2].[rest of target], where [module2] is identical to [module], unless mapped over by moduleMap.
Other targets are supposed to live in yade.wrapper (such as c++ classes)."""
writer=__builtin__.writer # to make sure not shadowed by a local var
import string
if target.startswith('yade.'):
module='.'.join(target.split('.')[0:2])
module2=(module if module not in moduleMap.keys() else moduleMap[module])
if target==module: target='' # to reference the module itself
uri=('%%%s#%s'%(module2,target) if writer=='latex' else '%s.html#%s'%(module2,target))
if not explicitText and module!=module2:
text=module2+'.'+'.'.join(target.split('.')[2:])
text=string.replace(text,'yade.','',1)
elif target.startswith('external:'):
exttarget=target.split(':',1)[1]
if not explicitText: text=exttarget
target=exttarget if '.' in exttarget else 'module-'+exttarget
uri=(('%%external#%s'%target) if writer=='latex' else 'external.html#%s'%target)
else:
uri=(('%%yade.wrapper#yade.wrapper.%s'%target) if writer=='latex' else 'yade.wrapper.html#yade.wrapper.%s'%target)
#print writer,uri
if 0:
refnode=addnodes.pending_xref(rawtext,reftype=role,refexplicit=explicitText,reftarget=target)
#refnode.line=lineno
#refnode+=nodes.literal(rawtext,text,classes=['ref',role])
return [refnode],[]
#ret.rawtext,reftype=role,
else:
return nodes.reference(rawtext,docutils.utils.unescape(text),refuri=uri,**options)
#return [refnode],[]
def ydefault_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :ydefault:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
def yattrtype_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrtype:`something` role. fixSignature handles it now in the member signature itself, this merely expands to nothing."
return [],[]
# FIXME: should return readable representation of bits of the number (yade.wrapper.AttrFlags enum)
def yattrflags_role(role,rawtext,text,lineno,inliner,options={},content=[]):
"Handle the :yattrflags:`something` role. fixSignature handles it now in the member signature itself."
return [],[]
from docutils.parsers.rst import roles
def yaderef_role_2(type,rawtext,text,lineno,inliner,options={},content=[]): return YadeXRefRole()('yref',rawtext,text,lineno,inliner,options,content)
roles.register_canonical_role('yref', yaderef_role)
roles.register_canonical_role('ysrc', yadesrc_role)
roles.register_canonical_role('ydefault', ydefault_role)
roles.register_canonical_role('yattrtype', yattrtype_role)
roles.register_canonical_role('yattrflags', yattrflags_role)
## http://sphinx.pocoo.org/config.html#confval-rst_epilog
rst_epilog = """
.. |yupdate| replace:: *(auto-updated)*
.. |ycomp| replace:: *(auto-computed)*
.. |ystatic| replace:: *(static)*
"""
import collections
def customExclude(app, what, name, obj, skip, options):
if name=='clone':
if 'Serializable.clone' in str(obj): return False
return True
#escape crash on non iterable __doc__ in some qt object
if hasattr(obj,'__doc__') and obj.__doc__ and not isinstance(obj.__doc__, collections.Iterable): return True
if hasattr(obj,'__doc__') and obj.__doc__ and ('|ydeprecated|' in obj.__doc__ or '|yhidden|' in obj.__doc__): return True
#if re.match(r'\b(__init__|__reduce__|__repr__|__str__)\b',name): return True
if name.startswith('_'):
if name=='__init__':
# skip boost classes with parameterless ctor (arg1=implicit self)
if obj.__doc__=="\n__init__( (object)arg1) -> None": return True
# skip undocumented ctors
if not obj.__doc__: return True
# skip default ctor for serializable, taking dict of attrs
if obj.__doc__=='\n__init__( (object)arg1) -> None\n\nobject __init__(tuple args, dict kwds)': return True
#for i,l in enumerate(obj.__doc__.split('\n')): print name,i,l,'##'
return False
return True
return False
def isBoostFunc(what,obj):
return what=='function' and obj.__repr__().startswith('<Boost.Python.function object at 0x')
def isBoostMethod(what,obj):
"I don't know how to distinguish boost and non-boost methods..."
return what=='method' and obj.__repr__().startswith('<unbound method ');
def replaceLaTeX(s):
# replace single non-escaped dollars $...$ by :math:`...`
# then \$ by single $
s=re.sub(r'(?<!\\)\$([^\$]+)(?<!\\)\$',r'\ :math:`\1`\ ',s)
return re.sub(r'\\\$',r'$',s)
def fixSrc(app,docname,source):
source[0]=replaceLaTeX(source[0])
def fixDocstring(app,what,name,obj,options,lines):
# remove empty default roles, which is not properly interpreted by docutils parser
for i in range(0,len(lines)):
lines[i]=lines[i].replace(':ydefault:``','')
lines[i]=lines[i].replace(':yattrtype:``','')
lines[i]=lines[i].replace(':yattrflags:``','')
#lines[i]=re.sub(':``',':` `',lines[i])
# remove signature of boost::python function docstring, which is the first line of the docstring
if isBoostFunc(what,obj):
l2=boostFuncSignature(name,obj)[1]
# we must replace lines one by one (in-place) :-|
# knowing that l2 is always shorter than lines (l2 is docstring with the signature stripped off)
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
elif isBoostMethod(what,obj):
l2=boostFuncSignature(name,obj)[1]
for i in range(0,len(lines)):
lines[i]=l2[i] if i<len(l2) else ''
# LaTeX: replace $...$ by :math:`...`
# must be done after calling boostFuncSignature which uses original docstring
for i in range(0,len(lines)): lines[i]=replaceLaTeX(lines[i])
def boostFuncSignature(name,obj,removeSelf=False):
"""Scan docstring of obj, returning tuple of properly formatted boost python signature
(first line of the docstring) and the rest of docstring (as list of lines).
The rest of docstring is stripped of 4 leading spaces which are automatically
added by boost.
removeSelf will attempt to remove the first argument from the signature.
"""
doc=obj.__doc__
if doc==None: # not a boost method
return None,None
nname=name.split('.')[-1]
docc=doc.split('\n')
if len(docc)<2: return None,docc
doc1=docc[1]
# functions with weird docstring, likely not documented by boost
if not re.match('^'+nname+r'(.*)->.*$',doc1):
return None,docc
if doc1.endswith(':'): doc1=doc1[:-1]
strippedDoc=doc.split('\n')[2:]
# check if all lines are padded
allLinesHave4LeadingSpaces=True
for l in strippedDoc:
if l.startswith(' '): continue
allLinesHave4LeadingSpaces=False; break
# remove the padding if so
if allLinesHave4LeadingSpaces: strippedDoc=[l[4:] for l in strippedDoc]
for i in range(len(strippedDoc)):
# fix signatures inside docstring (one function with multiple signatures)
strippedDoc[i],n=re.subn(r'([a-zA-Z_][a-zA-Z0-9_]*\() \(object\)arg1(, |)',r'\1',strippedDoc[i].replace('->','→'))
# inspect dosctring after mangling
if 'getViscoelasticFromSpheresInteraction' in name and False:
print name
print strippedDoc
print '======================'
for l in strippedDoc: print l
print '======================'
sig=doc1.split('(',1)[1]
if removeSelf:
# remove up to the first comma; if no comma present, then the method takes no arguments
# if [ precedes the comma, add it to the result (ugly!)
try:
ss=sig.split(',',1)
if ss[0].endswith('['): sig='['+ss[1]
else: sig=ss[1]
except IndexError:
# grab the return value
try:
sig=') -> '+sig.split('->')[-1]
#if 'Serializable' in name: print 1000*'#',name
except IndexError:
sig=')'
return '('+sig,strippedDoc
def fixSignature(app, what, name, obj, options, signature, return_annotation):
#print what,name,obj,signature#,dir(obj)
if what=='attribute':
doc=unicode(obj.__doc__)
ret=''
m=re.match('.*:ydefault:`(.*?)`.*',doc)
if m:
typ=''
#try:
# clss='.'.join(name.split('.')[:-1])
# instance=eval(clss+'()')
# typ='; '+getattr(instance,name.split('.')[-1]).__class__.__name__
# if typ=='; NoneType': typ=''
#except TypeError: ##no registered converted
# typ=''
dfl=m.group(1)
m2=re.match(r'\s*\(\s*\(\s*void\s*\)\s*\"(.*)\"\s*,\s*(.*)\s*\)\s*',dfl)
if m2: dfl="%s, %s"%(m2.group(2),m2.group(1))
if dfl!='': ret+=' (='+dfl+'%s)'%typ
else: ret+=' (=uninitalized%s)'%typ
#m=re.match('.*\[(.{,8})\].*',doc)
#m=re.match('.*:yunit:`(.?*)`.*',doc)
#if m:
# units=m.group(1)
# print '@@@@@@@@@@@@@@@@@@@@@',name,units
# ret+=' ['+units+']'
return ret,None
elif what=='class':
ret=[]
if len(obj.__bases__)>0:
base=obj.__bases__[0]
while base.__module__!='Boost.Python':
ret+=[base.__name__]
if len(base.__bases__)>0: base=base.__bases__[0]
else: break
if len(ret):
return ' (inherits '+u' → '.join(ret)+')',None
else: return None,None
elif isBoostFunc(what,obj):
sig=boostFuncSignature(name,obj)[0] or ' (wrapped c++ function)'
return sig,None
elif isBoostMethod(what,obj):
sig=boostFuncSignature(name,obj,removeSelf=True)[0]
return sig,None
#else: print what,name,obj.__repr__()
#return None,None
from sphinx import addnodes
def parse_ystaticattr(env,attr,attrnode):
m=re.match(r'([a-zA-Z0-9_]+)\.(.*)\(=(.*)\)',attr)
if not m:
print 100*'@'+' Static attribute %s not matched'%attr
attrnode+=addnodes.desc_name(attr,attr)
klass,name,default=m.groups()
#attrnode+=addnodes.desc_type('static','static')
attrnode+=addnodes.desc_name(name,name)
plist=addnodes.desc_parameterlist()
if default=='': default='unspecified'
plist+=addnodes.desc_parameter('='+default,'='+default)
attrnode+=plist
attrnode+=addnodes.desc_annotation(' [static]',' [static]')
return klass+'.'+name
#############################
## set tab size
###################
## http://groups.google.com/group/sphinx-dev/browse_thread/thread/35b8071ffe9a8feb
def setup(app):
from sphinx.highlighting import lexers
from pygments.lexers.compiled import CppLexer
lexers['cpp'] = CppLexer(tabsize=3)
lexers['c++'] = CppLexer(tabsize=3)
from pygments.lexers.agile import PythonLexer
lexers['python'] = PythonLexer(tabsize=3)
app.connect('source-read',fixSrc)
app.connect('autodoc-skip-member',customExclude)
app.connect('autodoc-process-signature',fixSignature)
app.connect('autodoc-process-docstring',fixDocstring)
app.add_description_unit('ystaticattr',None,objname='static attribute',indextemplate='pair: %s; static method',parse_node=parse_ystaticattr)
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#
# HACK: change ipython console regexp from ipython_console_highlighting.py
import re
sys.path.append(os.path.abspath('.'))
import yade.config
if 1:
if yade.runtime.ipython_version<12:
import ipython_directive as id
else:
if 12<=yade.runtime.ipython_version<13:
import ipython_directive012 as id
elif 13<=yade.runtime.ipython_version<200:
import ipython_directive013 as id
else:
import ipython_directive200 as id
#The next four lines are for compatibility with IPython 0.13.1
ipython_rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
ipython_rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
ipython_promptin ='Yade [%d]:'
ipython_promptout=' -> [%d]: '
ipython_cont_spaces=' '
#For IPython <=0.12, the following lines are used
id.rgxin =re.compile(r'(?:In |Yade )\[(\d+)\]:\s?(.*)\s*')
id.rgxout=re.compile(r'(?:Out| -> )\[(\d+)\]:\s?(.*)\s*')
id.rgxcont=re.compile(r'(?: +)\.\.+:\s?(.*)\s*')
id.fmtin ='Yade [%d]:'
id.fmtout =' -> [%d]: ' # for some reason, out and cont must have the trailing space
id.fmtcont=' .\D.: '
id.rc_override=dict(prompt_in1="Yade [\#]:",prompt_in2=" .\D.:",prompt_out=r" -> [\#]: ")
if yade.runtime.ipython_version<12:
id.reconfig_shell()
import ipython_console_highlighting as ich
ich.IPythonConsoleLexer.input_prompt = re.compile("(Yade \[[0-9]+\]: )")
ich.IPythonConsoleLexer.output_prompt = re.compile("(( -> |Out)|\[[0-9]+\]: )")
ich.IPythonConsoleLexer.continue_prompt = re.compile("\s+\.\.\.+:")
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.only_directives',
#'matplotlib.sphinxext.mathmpl',
'ipython_console_highlighting',
'youtube',
'sphinx.ext.todo',
]
if yade.runtime.ipython_version<12:
extensions.append('ipython_directive')
else:
if 12<=yade.runtime.ipython_version<13:
extensions.append('ipython_directive012')
elif 13<=yade.runtime.ipython_version<200:
extensions.append('ipython_directive013')
else:
extensions.append('ipython_directive200')
# the sidebar extension
if False:
if writer=='html':
extensions+=['sphinx.ext.sidebar']
sidebar_all=True
sidebar_relling=True
#sidebar_abbrev=True
sidebar_tocdepth=3
## http://trac.sagemath.org/sage_trac/attachment/ticket/7549/trac_7549-doc_inheritance_underscore.patch
# GraphViz includes dot, neato, twopi, circo, fdp.
graphviz_dot = 'dot'
inheritance_graph_attrs = { 'rankdir' : 'BT' }
inheritance_node_attrs = { 'height' : 0.5, 'fontsize' : 12, 'shape' : 'oval' }
inheritance_edge_attrs = {}
my_latex_preamble=r'''
\usepackage{euler} % must be loaded before fontspec for the whole doc (below); this must be kept for pngmath, however
\usepackage{hyperref}
\usepackage{amsmath}
\usepackage{amsbsy}
%\usepackage{mathabx}
\usepackage{underscore}
\usepackage[all]{xy}
% Metadata of the pdf output
\hypersetup{pdftitle={Yade Documentation}}
\hypersetup{pdfauthor={V. Smilauer, E. Catalano, B. Chareyre, S. Dorofeenko, J. Duriez, A. Gladky, J. Kozicki, C. Modenese, L. Scholtes, L. Sibille, J. Stransky, K. Thoeni}}
% symbols
\let\mat\boldsymbol % matrix
\let\vec\boldsymbol % vector
\let\tens\boldsymbol % tensor
\def\normalized#1{\widehat{#1}}
\def\locframe#1{\widetilde{#1}}
% timestep
\def\Dt{\Delta t}
\def\Dtcr{\Dt_{\rm cr}}
% algorithm complexity
\def\bigO#1{\ensuremath{\mathcal{O}(#1)}}
% variants for greek symbols
\let\epsilon\varepsilon
\let\theta\vartheta
\let\phi\varphi
% shorthands
\let\sig\sigma
\let\eps\epsilon
% variables at different points of time
\def\prev#1{#1^-}
\def\pprev#1{#1^\ominus}
\def\curr#1{#1^{\circ}}
\def\nnext#1{#1^\oplus}
\def\next#1{#1^+}
% shorthands for geometry
\def\currn{\curr{\vec{n}}}
\def\currC{\curr{\vec{C}}}
\def\uT{\vec{u}_T}
\def\curruT{\curr{\vec{u}}_T}
\def\prevuT{\prev{\vec{u}}_T}
\def\currn{\curr{\vec{n}}}
\def\prevn{\prev{\vec{n}}}
% motion
\def\pprevvel{\pprev{\dot{\vec{u}}}}
\def\nnextvel{\nnext{\dot{\vec{u}}}}
\def\curraccel{\curr{\ddot{\vec{u}}}}
\def\prevpos{\prev{\vec{u}}}
\def\currpos{\curr{\vec{u}}}
\def\nextpos{\next{\vec{u}}}
\def\curraaccel{\curr{\dot{\vec{\omega}}}}
\def\pprevangvel{\pprev{\vec{\omega}}}
\def\nnextangvel{\nnext{\vec{\omega}}}
\def\loccurr#1{\curr{\locframe{#1}}}
\def\numCPU{n_{\rm cpu}}
\DeclareMathOperator{\Align}{Align}
\DeclareMathOperator{\sign}{sgn}
% sorting algorithms
\def\isleq#1{\currelem{#1}\ar@/^/[ll]^{\leq}}
\def\isnleq#1{\currelem{#1}\ar@/^/[ll]^{\not\leq}}
\def\currelem#1{\fbox{$#1$}}
\def\sortSep{||}
\def\sortInv{\hbox{\phantom{||}}}
\def\sortlines#1{\xymatrix@=3pt{#1}}
\def\crossBound{||\mkern-18mu<}
'''
pngmath_latex_preamble=r'\usepackage[active]{preview}'+my_latex_preamble
pngmath_use_preview=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index-toctree'
# General information about the project.
project = u'Yade'
copyright = u'2009, Václav Šmilauer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = yade.config.version
# The full version, including alpha/beta/rc tags.
release = yade.config.revision
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['yade.']
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'stickysidebar':'true','collapsiblesidebar':'true','rightsidebar':'false'}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'fig/yade-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'fig/yade-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static-html']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_index='index.html'
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = { 'index':'index.html'}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yadedoc'
# -- Options for LaTeX output --------------------------------------------------
my_maketitle=r'''
\begin{titlepage}
\begin{flushright}
\hrule{}
% Upper part of the page
\begin{flushright}
\includegraphics[width=0.15\textwidth]{yade-logo.png}\par
\end{flushright}
\vspace{20 mm}
\text{\sffamily\bfseries\Huge Yade Documentation}\\
\vspace{5 mm}
\vspace{70 mm}
\begin{sffamily}\bfseries\Large
V\'{a}clav \v{S}milauer, Emanuele Catalano, Bruno Chareyre, Sergei Dorofeenko, Jerome Duriez, Anton Gladky, Janek Kozicki, Chiara Modenese, Luc Scholt\`{e}s, Luc Sibille, Jan Str\'{a}nsk\'{y}, Klaus Thoeni
\end{sffamily}
\vspace{20 mm}
\hrule{}
\vfill
% Bottom of the page
\textit{\Large Release '''\
+yade.config.revision\
+r''', \today}
\end{flushright}
\end{titlepage}
\text{\sffamily\bfseries\LARGE Authors}\\
\\
\text{\sffamily\bfseries\Large V\'{a}clav \v{S}milauer}\\
\text{\sffamily\Large Freelance consultant (http://woodem.eu)}\\
\\
\text{\sffamily\bfseries\Large Emanuele Catalano}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Bruno Chareyre}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Sergei Dorofeenko}\\
\text{\sffamily\Large IPCP RAS, Chernogolovka}\\
\\
\text{\sffamily\bfseries\Large Jerome Duriez}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Anton Gladky}\\
\text{\sffamily\Large TU Bergakademie Freiberg}\\
\\
\text{\sffamily\bfseries\Large Janek Kozicki}\\
\text{\sffamily\Large Gdansk University of Technology - lab. 3SR Grenoble University }\\
\\
\text{\sffamily\bfseries\Large Chiara Modenese}\\
\text{\sffamily\Large University of Oxford}\\
\\
\text{\sffamily\bfseries\Large Luc Scholt\`{e}s}\\
\text{\sffamily\Large Grenoble INP, UJF, CNRS, lab. 3SR}\\
\\
\text{\sffamily\bfseries\Large Luc Sibille}\\
\text{\sffamily\Large University of Nantes, lab. GeM}\\
\\
\text{\sffamily\bfseries\Large Jan Str\'{a}nsk\'{y}}\\
\text{\sffamily\Large CVUT Prague}\\
\\
\text{\sffamily\bfseries\Large Klaus Thoeni}
\text{\sffamily\Large The University of Newcastle (Australia)}\\
\text{\sffamily\bfseries\large Citing this document}\\
In order to let users cite Yade consistently in publications, we provide a list of bibliographic references for the different parts of the documentation. This way of acknowledging Yade is also a way to make developments and documentation of Yade more attractive for researchers, who are evaluated on the basis of citations of their work by others. We therefore kindly ask users to cite Yade as accurately as possible in their papers, as explained in http://yade-dem/doc/citing.html.
'''
latex_elements=dict(
papersize='a4paper',
fontpkg=r'''
\usepackage{euler}
\usepackage{fontspec,xunicode,xltxtra}
%\setmainfont[BoldFont={LMRoman10 Bold}]{CMU Concrete} %% CMU Concrete must be installed by hand as otf
''',
utf8extra='',
fncychap='',
preamble=my_latex_preamble,
footer='',
inputenc='',
fontenc='',
maketitle=my_maketitle,
)
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index-toctree', 'Yade.tex', u'Yade Documentation',
u'Václav Šmilauer', 'manual'),
('index-toctree_manuals', 'YadeManuals.tex', u'Yade Tutorial and Manuals',
u'Václav Šmilauer', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = 'fig/yade-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
gpl-2.0
| -657,527,205,585,148,000 | 34.729592 | 482 | 0.695523 | false |
armikhael/software-center
|
test/gtk3/test_views.py
|
1
|
1845
|
#!/usr/bin/python
from gi.repository import Gtk, GObject
import sys
import unittest
sys.path.insert(0,"../..")
sys.path.insert(0,"..")
#from mock import Mock
TIMEOUT=300
import softwarecenter.paths
softwarecenter.paths.datadir = "../data"
class TestViews(unittest.TestCase):
def test_viewswitcher(self):
from softwarecenter.ui.gtk3.panes.viewswitcher import get_test_window_viewswitcher
win = get_test_window_viewswitcher()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_catview(self):
from softwarecenter.ui.gtk3.views.catview_gtk import get_test_window_catview
win = get_test_window_catview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_appdetails(self):
from softwarecenter.ui.gtk3.views.appdetailsview_gtk import get_test_window_appdetails
win = get_test_window_appdetails()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_pkgsnames(self):
from softwarecenter.ui.gtk3.views.pkgnamesview import get_test_window_pkgnamesview
win = get_test_window_pkgnamesview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_purchaseview(self):
from softwarecenter.ui.gtk3.views.purchaseview import get_test_window_purchaseview
win = get_test_window_purchaseview()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
def test_appview(self):
from softwarecenter.ui.gtk3.views.appview import get_test_window
win = get_test_window()
GObject.timeout_add(TIMEOUT, lambda: win.destroy())
Gtk.main()
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
gpl-3.0
| -2,807,920,920,501,459,500 | 30.271186 | 94 | 0.673713 | false |
zhanghui9700/eonboard
|
eoncloud_web/biz/account/views.py
|
1
|
16684
|
#-*-coding-utf-8-*-
import logging
from datetime import datetime
from rest_framework import generics
from rest_framework import status
from rest_framework.response import Response
from rest_framework.decorators import api_view
from django.conf import settings
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import check_password
from biz.account.settings import QUOTA_ITEM, NotificationLevel
from biz.account.models import (Contract, Operation, Quota,
UserProxy, Notification, Feed, UserProfile)
from biz.account.serializer import (ContractSerializer, OperationSerializer,
UserSerializer, QuotaSerializer,
FeedSerializer, DetailedUserSerializer,
NotificationSerializer)
from biz.account.utils import get_quota_usage
from biz.idc.models import DataCenter
from biz.common.pagination import PagePagination
from biz.common.decorators import require_POST, require_GET
from biz.common.utils import retrieve_params
from cloud.tasks import (link_user_to_dc_task, send_notifications,
send_notifications_by_data_center)
from frontend.forms import CloudUserCreateFormWithoutCapatcha
LOG = logging.getLogger(__name__)
@api_view(["GET"])
def contract_view(request):
c = Contract.objects.filter(user=request.user,
udc__id=request.session["UDC_ID"])[0]
s = ContractSerializer(c)
return Response(s.data)
@api_view(["GET"])
def quota_view(request):
quota = get_quota_usage(request.user, request.session["UDC_ID"])
return Response(quota)
class OperationList(generics.ListAPIView):
queryset = Operation.objects
serializer_class = OperationSerializer
pagination_class = PagePagination
def get_queryset(self):
request = self.request
resource = request.query_params.get('resource')
resource_name = request.query_params.get('resource_name')
start_date = request.query_params.get('start_date')
end_date = request.query_params.get('end_date')
queryset = super(OperationList, self).get_queryset()
if resource:
queryset = queryset.filter(resource=resource)
if resource_name:
queryset = queryset.filter(resource_name__istartswith=resource_name)
if start_date:
queryset = queryset.filter(create_date__gte=start_date)
if end_date:
queryset = queryset.filter(create_date__lte=end_date)
if request.user.is_superuser:
data_center_pk = request.query_params.get('data_center', '')
operator_pk = request.query_params.get('operator', '')
if data_center_pk:
queryset = queryset.filter(udc__data_center__pk=data_center_pk)
if operator_pk:
queryset = queryset.filter(user__pk=operator_pk)
else:
queryset = queryset.filter(user=request.user,
udc__id=request.session["UDC_ID"])
return queryset.order_by('-create_date')
@api_view()
def operation_filters(request):
resources = Operation.objects.values('resource').distinct()
for data in resources:
data['name'] = _(data['resource'])
return Response({
"resources": resources,
"operators": UserProxy.normal_users.values('pk', 'username'),
"data_centers": DataCenter.objects.values('pk', 'name')
})
class ContractList(generics.ListCreateAPIView):
queryset = Contract.living.filter(deleted=False)
serializer_class = ContractSerializer
def list(self, request, *args, **kwargs):
serializer = ContractSerializer(self.get_queryset(), many=True)
return Response(serializer.data)
class ContractDetail(generics.RetrieveAPIView):
queryset = Contract.living.all()
serializer_class = ContractSerializer
@api_view(['POST'])
def create_contract(request):
try:
serializer = ContractSerializer(data=request.data,
context={"request": request})
if serializer.is_valid():
contract = serializer.save()
Operation.log(contract, contract.name, 'create', udc=contract.udc,
user=request.user)
return Response({'success': True,
"msg": _('Contract is created successfully!')},
status=status.HTTP_201_CREATED)
else:
return Response({"success": False,
"msg": _('Contract data is not valid!'),
'errors': serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
LOG.error("Failed to create contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to create contract for unknown reason.')})
@api_view(['POST'])
def update_contract(request):
try:
pk = request.data['id']
contract = Contract.objects.get(pk=pk)
contract.name = request.data['name']
contract.customer = request.data['customer']
contract.start_date = datetime.strptime(request.data['start_date'],
'%Y-%m-%d %H:%M:%S')
contract.end_date = datetime.strptime(request.data['end_date'],
'%Y-%m-%d %H:%M:%S')
contract.save()
Operation.log(contract, contract.name, 'update', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contract is updated successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to update contract, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to update contract for unknown reason.')})
@api_view(['POST'])
def delete_contracts(request):
try:
contract_ids = request.data.getlist('contract_ids[]')
for contract_id in contract_ids:
contract = Contract.objects.get(pk=contract_id)
contract.deleted = True
contract.save()
Quota.living.filter(contract__pk=contract_id).update(deleted=True,
update_date=timezone.now())
Operation.log(contract, contract.name, 'delete', udc=contract.udc,
user=request.user)
return Response(
{'success': True, "msg": _('Contracts have been deleted!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to delete contracts, msg:[%s]" % e)
return Response({"success": False, "msg": _(
'Failed to delete contracts for unknown reason.')})
class UserList(generics.ListAPIView):
queryset = UserProxy.normal_users
serializer_class = UserSerializer
def list(self, request, *args, **kwargs):
serializer = self.serializer_class(self.get_queryset(), many=True)
return Response(serializer.data)
@require_GET
def active_users(request):
queryset = UserProxy.normal_users.filter(is_active=True)
serializer = UserSerializer(queryset.all(), many=True)
return Response(serializer.data)
class UserDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = UserProxy.normal_users.all()
serializer_class = DetailedUserSerializer
def perform_destroy(self, instance):
instance.is_active = False
instance.save()
@api_view(['POST'])
def deactivate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = False
user.save()
return Response({"success": True, "msg": _('User has been deactivated!')},
status=status.HTTP_200_OK)
@api_view(['POST'])
def activate_user(request):
pk = request.data['id']
user = User.objects.get(pk=pk)
user.is_active = True
user.save()
return Response({"success": True, "msg": _('User has been activated!')},
status=status.HTTP_200_OK)
@api_view(["POST"])
def change_password(request):
user = request.user
old_password = request.data['old_password']
new_password = request.data['new_password']
confirm_password = request.data['confirm_password']
if new_password != confirm_password:
return Response({"success": False, "msg": _(
"The new password doesn't match confirm password!")})
if not check_password(old_password, user.password):
return Response({"success": False,
"msg": _("The original password is not correct!")})
user.set_password(new_password)
user.save()
return Response({"success": True, "msg": _(
"Password has been changed! Please login in again.")})
class QuotaList(generics.ListAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset()
if 'contract_id' in request.query_params:
queryset = queryset.filter(
contract__id=request.query_params['contract_id'])
return Response(self.serializer_class(queryset, many=True).data)
class QuotaDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Quota.living
serializer_class = QuotaSerializer
@api_view(['GET'])
def resource_options(request):
return Response(QUOTA_ITEM)
@api_view(['POST'])
def create_quotas(request):
try:
contract = Contract.objects.get(pk=request.data['contract_id'])
quota_ids = request.data.getlist('ids[]')
resources = request.data.getlist('resources[]')
limits = request.data.getlist('limits[]')
for index, quota_id in enumerate(quota_ids):
resource, limit = resources[index], limits[index]
if quota_id and Quota.living.filter(contract=contract,
pk=quota_id).exists():
Quota.objects.filter(pk=quota_id).update(resource=resource,
limit=limit,
update_date=timezone.now())
else:
Quota.objects.create(resource=resource, limit=limit,
contract=contract)
Operation.log(contract, contract.name + " quota", 'update',
udc=contract.udc, user=request.user)
return Response({'success': True,
"msg": _('Quotas have been saved successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quotas, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quotas for unknown reason.')})
@api_view(['POST'])
def create_quota(request):
try:
contract = Contract.objects.get(pk=request.data['contract'])
resource, limit = request.data['resource'], request.data['limit']
pk = request.data['id'] if 'id' in request.data else None
if pk and Quota.objects.filter(pk=pk).exists():
quota = Quota.objects.get(pk=pk)
quota.limit = limit
quota.save()
else:
quota = Quota.objects.create(resource=resource,
limit=limit,
contract=contract)
return Response({'success': True,
"msg": _('Quota have been saved successfully!'),
"quota": QuotaSerializer(quota).data},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to save quota, msg:[%s]" % e)
return Response({"success": False,
"msg": _('Failed to save quota for unknown reason.')})
@api_view(['POST'])
def delete_quota(request):
try:
Quota.living.filter(pk=request.data['id']).update(deleted=True)
return Response({'success': True,
"msg": _('Quota have been deleted successfully!')},
status=status.HTTP_201_CREATED)
except Exception as e:
LOG.error("Failed to create quota, msg:[%s]" % e)
return Response(
{"success": False,
"msg": _('Failed to create quota for unknown reason.')}
)
@api_view(["GET"])
def get_config_view(request):
return Response(settings.SITE_CONFIG)
@require_GET
def notification_options(request):
return Response(NotificationLevel.OPTIONS)
@require_POST
def broadcast(request):
receiver_ids = request.data.getlist('receiver_ids[]')
level, title, content = retrieve_params(request.data,
'level', 'title', 'content')
send_notifications.delay(title, content, level, receiver_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def data_center_broadcast(request):
level, title, content = retrieve_params(
request.data, 'level', 'title', 'content')
dc_ids = request.data.getlist('data_centers[]')
send_notifications_by_data_center.delay(title, content, level, dc_ids)
return Response({"success": True,
"msg": _('Notification is sent successfully!')})
@require_POST
def announce(request):
level, title, content = retrieve_params(request.data, 'level', 'title',
'content')
Notification.objects.create(title=title, content=content,
level=level, is_announcement=True)
return Response({"success": True,
"msg": _('Announcement is sent successfully!')})
class NotificationList(generics.ListAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(is_auto=False).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class NotificationDetail(generics.RetrieveDestroyAPIView):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
class FeedList(generics.ListAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def list(self, request, *args, **kwargs):
queryset = self.get_queryset().filter(receiver=request.user).order_by(
'-create_date')
return Response(self.serializer_class(queryset, many=True).data)
class FeedDetail(generics.RetrieveDestroyAPIView):
queryset = Feed.living.all()
serializer_class = FeedSerializer
def perform_destroy(self, instance):
instance.fake_delete()
@require_GET
def feed_status(request):
Notification.pull_announcements(request.user)
num = Feed.living.filter(receiver=request.user, is_read=False).count()
return Response({"num": num})
@require_POST
def mark_read(request, pk):
Feed.living.get(pk=pk).mark_read()
return Response(status=status.HTTP_200_OK)
@require_POST
def initialize_user(request):
user_id = request.data['user_id']
user = User.objects.get(pk=user_id)
link_user_to_dc_task(user, DataCenter.get_default())
return Response({"success": True,
"msg": _("Initialization is successful.")})
@require_POST
def create_user(request):
user = User()
form = CloudUserCreateFormWithoutCapatcha(data=request.POST, instance=user)
if not form.is_valid():
return Response({
"success": False,
"msg": _("Data is not valid")
})
form.save()
link_user_to_dc_task.delay(user, DataCenter.get_default())
return Response({"success": True,
"msg": _("User is created successfully!")})
@require_GET
def is_username_unique(request):
username = request.GET['username']
return Response(not UserProxy.objects.filter(username=username).exists())
@require_GET
def is_email_unique(request):
email = request.GET['email']
return Response(not UserProxy.objects.filter(email=email).exists())
@require_GET
def is_mobile_unique(request):
mobile = request.GET['mobile']
return Response(not UserProfile.objects.filter(mobile=mobile).exists())
|
apache-2.0
| 3,498,255,991,737,627,000 | 31.972332 | 92 | 0.611664 | false |
martindisch/Arduino
|
Watering/logger.py
|
1
|
1285
|
import os, serial, time, numpy
watchTime = 3600
measureInterval = 5
calcTime = 1.5
def receiving(ser):
global last_received
buffer_string = ''
while True:
buffer_string = buffer_string + ser.read(ser.inWaiting())
if '\n' in buffer_string:
lines = buffer_string.split('\n') # Guaranteed to have at least 2 entries
last_received = lines[-2]
buffer_string = lines[-1]
return last_received
def dateTime():
return time.strftime("%Y/%m/%d %H:%M:%S")
ser = serial.Serial('/dev/ttyACM0', 9600)
# Don't write immediately, the Arduino is restarting
time.sleep(3)
timePassed = 0
values = ()
startDate = "none"
while 1:
if timePassed >= watchTime:
f = open(os.path.expanduser('~') + "/waterlog.txt", 'a')
f.write(startDate + " - " + dateTime() + " " + str(numpy.mean(values)) + "\n")
f.close()
print dateTime() + " Wrote to file successfully"
timePassed = 0
values = ()
startDate = "none"
if "none" in startDate:
startDate = dateTime()
ser.write('4')
message = receiving(ser)
value = int(message[-4:])
values += (value,)
timePassed += measureInterval
time.sleep(measureInterval - calcTime)
|
mit
| -8,848,546,226,253,781,000 | 25.770833 | 97 | 0.583658 | false |
fahhem/plumbum
|
plumbum/commands/modifiers.py
|
1
|
12344
|
import os
from select import select
from subprocess import PIPE
import sys
from itertools import chain
from plumbum.commands.processes import run_proc, ProcessExecutionError
from plumbum.commands.base import AppendingStdoutRedirection, StdoutRedirection
from plumbum.lib import read_fd_decode_safely
class Future(object):
"""Represents a "future result" of a running process. It basically wraps a ``Popen``
object and the expected exit code, and provides poll(), wait(), returncode, stdout,
and stderr.
"""
def __init__(self, proc, expected_retcode, timeout = None):
self.proc = proc
self._expected_retcode = expected_retcode
self._timeout = timeout
self._returncode = None
self._stdout = None
self._stderr = None
def __repr__(self):
return "<Future %r (%s)>" % (self.proc.argv, self._returncode if self.ready() else "running",)
def poll(self):
"""Polls the underlying process for termination; returns ``False`` if still running,
or ``True`` if terminated"""
if self.proc.poll() is not None:
self.wait()
return self._returncode is not None
ready = poll
def wait(self):
"""Waits for the process to terminate; will raise a
:class:`plumbum.commands.ProcessExecutionError` in case of failure"""
if self._returncode is not None:
return
self._returncode, self._stdout, self._stderr = run_proc(self.proc,
self._expected_retcode, self._timeout)
@property
def stdout(self):
"""The process' stdout; accessing this property will wait for the process to finish"""
self.wait()
return self._stdout
@property
def stderr(self):
"""The process' stderr; accessing this property will wait for the process to finish"""
self.wait()
return self._stderr
@property
def returncode(self):
"""The process' returncode; accessing this property will wait for the process to finish"""
self.wait()
return self._returncode
#===================================================================================================
# execution modifiers
#===================================================================================================
class ExecutionModifier(object):
__slots__ = ("__weakref__",)
def __repr__(self):
"""Automatically creates a representation for given subclass with slots.
Ignore hidden properties."""
slots = {}
for cls in self.__class__.__mro__:
slots_list = getattr(cls, "__slots__", ())
if isinstance(slots_list, str):
slots_list = (slots_list,)
for prop in slots_list:
if prop[0] != '_':
slots[prop] = getattr(self, prop)
mystrs = ("{0} = {1}".format(name, slots[name]) for name in slots)
return "{0}({1})".format(self.__class__.__name__, ", ".join(mystrs))
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
class BG(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
__slots__ = ("retcode", "kargs")
def __init__(self, retcode=0, **kargs):
self.retcode = retcode
self.kargs = kargs
def __rand__(self, cmd):
return Future(cmd.popen(**self.kargs), self.retcode)
BG = BG()
"""
An execution modifier that runs the given command in the background, returning a
:class:`Future <plumbum.commands.Future>` object. In order to mimic shell syntax, it applies
when you right-and it with a command. If you wish to expect a different return code
(other than the normal success indicate by 0), use ``BG(retcode)``. Example::
future = sleep[5] & BG # a future expecting an exit code of 0
future = sleep[5] & BG(7) # a future expecting an exit code of 7
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
"""
class FG(ExecutionModifier):
"""
An execution modifier that runs the given command in the foreground, passing it the
current process' stdin, stdout and stderr. Useful for interactive programs that require
a TTY. There is no return value.
In order to mimic shell syntax, it applies when you right-and it with a command.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``FG(retcode)``. Example::
vim & FG # run vim in the foreground, expecting an exit code of 0
vim & FG(7) # run vim in the foreground, expecting an exit code of 7
"""
__slots__ = ("retcode",)
def __init__(self, retcode=0):
self.retcode = retcode
def __rand__(self, cmd):
cmd(retcode = self.retcode, stdin = None, stdout = None, stderr = None)
FG = FG()
class TEE(ExecutionModifier):
"""Run a command, dumping its stdout/stderr to the current process's stdout
and stderr, but ALSO return them. Useful for interactive programs that
expect a TTY but also have valuable output.
Use as:
ls["-l"] & TEE
Returns a tuple of (return code, stdout, stderr), just like ``run()``.
"""
__slots__ = ("retcode", "buffered")
def __init__(self, retcode=0, buffered=True):
"""`retcode` is the return code to expect to mean "success". Set
`buffered` to False to disable line-buffering the output, which may
cause stdout and stderr to become more entangled than usual.
"""
self.retcode = retcode
self.buffered = buffered
def __rand__(self, cmd):
with cmd.bgrun(retcode=self.retcode, stdin=None, stdout=PIPE, stderr=PIPE) as p:
outbuf = []
errbuf = []
out = p.stdout
err = p.stderr
buffers = {out: outbuf, err: errbuf}
tee_to = {out: sys.stdout, err: sys.stderr}
while p.poll() is None:
ready, _, _ = select((out, err), (), ())
for fd in ready:
buf = buffers[fd]
data, text = read_fd_decode_safely(fd, 4096)
if not data: # eof
continue
# Python conveniently line-buffers stdout and stderr for
# us, so all we need to do is write to them
# This will automatically add up to three bytes if it cannot be decoded
tee_to[fd].write(text)
# And then "unbuffered" is just flushing after each write
if not self.buffered:
tee_to[fd].flush()
buf.append(data)
stdout = ''.join([x.decode('utf-8') for x in outbuf])
stderr = ''.join([x.decode('utf-8') for x in errbuf])
return p.returncode, stdout, stderr
TEE = TEE()
class TF(ExecutionModifier):
"""
An execution modifier that runs the given command, but returns True/False depending on the retcode.
This returns True if the expected exit code is returned, and false if it is not.
This is useful for checking true/false bash commands.
If you wish to expect a different return code (other than the normal success indicate by 0),
use ``TF(retcode)``. If you want to run the process in the forground, then use
``TF(FG=True)``.
Example::
local['touch']['/root/test'] & TF * Returns False, since this cannot be touched
local['touch']['/root/test'] & TF(1) # Returns True
local['touch']['/root/test'] & TF(FG=True) * Returns False, will show error message
"""
__slots__ = ("retcode", "FG")
def __init__(self, retcode=0, FG=False):
"""`retcode` is the return code to expect to mean "success". Set
`FG` to True to run in the foreground.
"""
self.retcode = retcode
self.FG = FG
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
try:
if self.FG:
cmd(retcode = self.retcode, stdin = None, stdout = None, stderr = None)
else:
cmd(retcode = self.retcode)
return True
except ProcessExecutionError:
return False
TF = TF()
class RETCODE(ExecutionModifier):
"""
An execution modifier that runs the given command, causing it to run and return the retcode.
This is useful for working with bash commands that have important retcodes but not very
useful output.
If you want to run the process in the forground, then use ``RETCODE(FG=True)``.
Example::
local['touch']['/root/test'] & RETCODE # Returns 1, since this cannot be touched
local['touch']['/root/test'] & RETCODE(FG=True) * Returns 1, will show error message
"""
__slots__ = ("foreground",)
def __init__(self, FG=False):
"""`FG` to True to run in the foreground.
"""
self.foreground = FG
@classmethod
def __call__(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __rand__(self, cmd):
if self.foreground:
return cmd.run(retcode = None, stdin = None, stdout = None, stderr = None)[0]
else:
return cmd.run(retcode = None)[0]
RETCODE = RETCODE()
class NOHUP(ExecutionModifier):
"""
An execution modifier that runs the given command in the background, disconnected
from the current process, returning a
standard popen object. It will keep running even if you close the current process.
In order to slightly mimic shell syntax, it applies
when you right-and it with a command. If you wish to use a diffent working directory
or different stdout, stderr, you can use named arguments. The default is ``NOHUP(
cwd=local.cwd, stdout='nohup.out', stderr=None)``. If stderr is None, stderr will be
sent to stdout. Use ``os.devnull`` for null output. Will respect redirected output.
Example::
sleep[5] & NOHUP # Outputs to nohup.out
sleep[5] & NOHUP(stdout=os.devnull) # No output
The equivelent bash command would be
.. code-block:: bash
nohup sleep 5 &
"""
__slots__ = ('cwd', 'stdout', 'stderr', 'append')
def __init__(self, cwd='.', stdout='nohup.out', stderr=None, append=True):
""" Set ``cwd``, ``stdout``, or ``stderr``.
Runs as a forked process. You can set ``append=False``, too.
"""
self.cwd = cwd
self.stdout = stdout
self.stderr = stderr
self.append = append
def __rand__(self, cmd):
if isinstance(cmd, StdoutRedirection):
stdout = cmd.file
append = False
cmd = cmd.cmd
elif isinstance(cmd, AppendingStdoutRedirection):
stdout = cmd.file
append = True
cmd = cmd.cmd
else:
stdout = self.stdout
append = self.append
return cmd.nohup(cmd, self.cwd, stdout, self.stderr, append)
NOHUP = NOHUP()
|
mit
| -2,994,591,293,210,177,000 | 35.847761 | 103 | 0.597294 | false |
feend78/evennia
|
evennia/server/inputfuncs.py
|
1
|
15810
|
"""
Functions for processing input commands.
All global functions in this module whose name does not start with "_"
is considered an inputfunc. Each function must have the following
callsign:
inputfunc(session, *args, **kwargs)
Where "options" is always one of the kwargs, containing eventual
protocol-options.
There is one special function, the "default" function, which is called
on a no-match. It has this callsign:
default(session, cmdname, *args, **kwargs)
Evennia knows which modules to use for inputfuncs by
settings.INPUT_FUNC_MODULES.
"""
from future.utils import viewkeys
import importlib
from django.conf import settings
from evennia.commands.cmdhandler import cmdhandler
from evennia.accounts.models import AccountDB
from evennia.utils.logger import log_err
from evennia.utils.utils import to_str, to_unicode
BrowserSessionStore = importlib.import_module(settings.SESSION_ENGINE).SessionStore
# always let "idle" work since we use this in the webclient
_IDLE_COMMAND = settings.IDLE_COMMAND
_IDLE_COMMAND = (_IDLE_COMMAND, ) if _IDLE_COMMAND == "idle" else (_IDLE_COMMAND, "idle")
_GA = object.__getattribute__
_SA = object.__setattr__
def _NA(o):
return "N/A"
_ERROR_INPUT = "Inputfunc {name}({session}): Wrong/unrecognized input: {inp}"
# All global functions are inputfuncs available to process inputs
def text(session, *args, **kwargs):
"""
Main text input from the client. This will execute a command
string on the server.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is used as text-command input. Other
arguments are ignored.
"""
#from evennia.server.profiling.timetrace import timetrace
#text = timetrace(text, "ServerSession.data_in")
txt = args[0] if args else None
# explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
if session.account:
# nick replacement
puppet = session.puppet
if puppet:
txt = puppet.nicks.nickreplace(txt,
categories=("inputline", "channel"), include_account=True)
else:
txt = session.account.nicks.nickreplace(txt,
categories=("inputline", "channel"), include_account=False)
kwargs.pop("options", None)
cmdhandler(session, txt, callertype="session", session=session, **kwargs)
session.update_session_counters()
def bot_data_in(session, *args, **kwargs):
"""
Text input from the IRC and RSS bots.
This will trigger the execute_cmd method on the bots in-game counterpart.
Args:
session (Session): The active Session to receive the input.
text (str): First arg is text input. Other arguments are ignored.
"""
txt = args[0] if args else None
# Explicitly check for None since text can be an empty string, which is
# also valid
if txt is None:
return
# this is treated as a command input
# handle the 'idle' command
if txt.strip() in _IDLE_COMMAND:
session.update_session_counters(idle=True)
return
kwargs.pop("options", None)
# Trigger the execute_cmd method of the corresponding bot.
session.account.execute_cmd(session=session, txt=txt, **kwargs)
session.update_session_counters()
def echo(session, *args, **kwargs):
"""
Echo test function
"""
session.data_out(text="Echo returns: %s" % args)
def default(session, cmdname, *args, **kwargs):
"""
Default catch-function. This is like all other input functions except
it will get `cmdname` as the first argument.
"""
err = "Session {sessid}: Input command not recognized:\n" \
" name: '{cmdname}'\n" \
" args, kwargs: {args}, {kwargs}".format(sessid=session.sessid,
cmdname=cmdname,
args=args,
kwargs=kwargs)
if session.protocol_flags.get("INPUTDEBUG", False):
session.msg(err)
log_err(err)
def client_options(session, *args, **kwargs):
"""
This allows the client an OOB way to inform us about its name and capabilities.
This will be integrated into the session settings
Kwargs:
get (bool): If this is true, return the settings as a dict
(ignore all other kwargs).
client (str): A client identifier, like "mushclient".
version (str): A client version
ansi (bool): Supports ansi colors
xterm256 (bool): Supports xterm256 colors or not
mxp (bool): Supports MXP or not
utf-8 (bool): Supports UTF-8 or not
screenreader (bool): Screen-reader mode on/off
mccp (bool): MCCP compression on/off
screenheight (int): Screen height in lines
screenwidth (int): Screen width in characters
inputdebug (bool): Debug input functions
nocolor (bool): Strip color
raw (bool): Turn off parsing
"""
flags = session.protocol_flags
if not kwargs or kwargs.get("get", False):
# return current settings
options = dict((key, flags[key]) for key in flags
if key.upper() in ("ANSI", "XTERM256", "MXP",
"UTF-8", "SCREENREADER", "ENCODING",
"MCCP", "SCREENHEIGHT",
"SCREENWIDTH", "INPUTDEBUG",
"RAW", "NOCOLOR",
"NOGOAHEAD"))
session.msg(client_options=options)
return
def validate_encoding(val):
# helper: change encoding
try:
to_str(to_unicode("test-string"), encoding=val)
except LookupError:
raise RuntimeError("The encoding '|w%s|n' is invalid. " % val)
return val
def validate_size(val):
return {0: int(val)}
def validate_bool(val):
if isinstance(val, basestring):
return True if val.lower() in ("true", "on", "1") else False
return bool(val)
for key, value in kwargs.iteritems():
key = key.lower()
if key == "client":
flags["CLIENTNAME"] = to_str(value)
elif key == "version":
if "CLIENTNAME" in flags:
flags["CLIENTNAME"] = "%s %s" % (flags["CLIENTNAME"], to_str(value))
elif key == "ENCODING":
flags["ENCODING"] = validate_encoding(value)
elif key == "ansi":
flags["ANSI"] = validate_bool(value)
elif key == "xterm256":
flags["XTERM256"] = validate_bool(value)
elif key == "mxp":
flags["MXP"] = validate_bool(value)
elif key == "utf-8":
flags["UTF-8"] = validate_bool(value)
elif key == "screenreader":
flags["SCREENREADER"] = validate_bool(value)
elif key == "mccp":
flags["MCCP"] = validate_bool(value)
elif key == "screenheight":
flags["SCREENHEIGHT"] = validate_size(value)
elif key == "screenwidth":
flags["SCREENWIDTH"] = validate_size(value)
elif key == "inputdebug":
flags["INPUTDEBUG"] = validate_bool(value)
elif key == "nocolor":
flags["NOCOLOR"] = validate_bool(value)
elif key == "raw":
flags["RAW"] = validate_bool(value)
elif key == "nogoahead":
flags["NOGOAHEAD"] = validate_bool(value)
elif key in ('Char 1', 'Char.Skills 1', 'Char.Items 1',
'Room 1', 'IRE.Rift 1', 'IRE.Composer 1'):
# ignore mudlet's default send (aimed at IRE games)
pass
elif key not in ("options", "cmdid"):
err = _ERROR_INPUT.format(
name="client_settings", session=session, inp=key)
session.msg(text=err)
session.protocol_flags = flags
# we must update the portal as well
session.sessionhandler.session_portal_sync(session)
# GMCP alias
hello = client_options
supports_set = client_options
def get_client_options(session, *args, **kwargs):
"""
Alias wrapper for getting options.
"""
client_options(session, get=True)
def get_inputfuncs(session, *args, **kwargs):
"""
Get the keys of all available inputfuncs. Note that we don't get
it from this module alone since multiple modules could be added.
So we get it from the sessionhandler.
"""
inputfuncsdict = dict((key, func.__doc__) for key, func
in session.sessionhandler.get_inputfuncs().iteritems())
session.msg(get_inputfuncs=inputfuncsdict)
def login(session, *args, **kwargs):
"""
Peform a login. This only works if session is currently not logged
in. This will also automatically throttle too quick attempts.
Kwargs:
name (str): Account name
password (str): Plain-text password
"""
if not session.logged_in and "name" in kwargs and "password" in kwargs:
from evennia.commands.default.unloggedin import create_normal_account
account = create_normal_account(session, kwargs["name"], kwargs["password"])
if account:
session.sessionhandler.login(session, account)
_gettable = {
"name": lambda obj: obj.key,
"key": lambda obj: obj.key,
"location": lambda obj: obj.location.key if obj.location else "None",
"servername": lambda obj: settings.SERVERNAME
}
def get_value(session, *args, **kwargs):
"""
Return the value of a given attribute or db_property on the
session's current account or character.
Kwargs:
name (str): Name of info value to return. Only names
in the _gettable dictionary earlier in this module
are accepted.
"""
name = kwargs.get("name", "")
obj = session.puppet or session.account
if name in _gettable:
session.msg(get_value={"name": name, "value": _gettable[name](obj)})
def _testrepeat(**kwargs):
"""
This is a test function for using with the repeat
inputfunc.
Kwargs:
session (Session): Session to return to.
"""
import time
kwargs["session"].msg(repeat="Repeat called: %s" % time.time())
_repeatable = {"test1": _testrepeat, # example only
"test2": _testrepeat} # "
def repeat(session, *args, **kwargs):
"""
Call a named function repeatedly. Note that
this is meant as an example of limiting the number of
possible call functions.
Kwargs:
callback (str): The function to call. Only functions
from the _repeatable dictionary earlier in this
module are available.
interval (int): How often to call function (s).
Defaults to once every 60 seconds with a minimum
of 5 seconds.
stop (bool): Stop a previously assigned ticker with
the above settings.
"""
from evennia.scripts.tickerhandler import TICKER_HANDLER
name = kwargs.get("callback", "")
interval = max(5, int(kwargs.get("interval", 60)))
if name in _repeatable:
if kwargs.get("stop", False):
TICKER_HANDLER.remove(interval, _repeatable[name], idstring=session.sessid, persistent=False)
else:
TICKER_HANDLER.add(interval, _repeatable[name], idstring=session.sessid, persistent=False, session=session)
else:
session.msg("Allowed repeating functions are: %s" % (", ".join(_repeatable)))
def unrepeat(session, *args, **kwargs):
"Wrapper for OOB use"
kwargs["stop"] = True
repeat(session, *args, **kwargs)
_monitorable = {
"name": "db_key",
"location": "db_location",
"desc": "desc"
}
def _on_monitor_change(**kwargs):
fieldname = kwargs["fieldname"]
obj = kwargs["obj"]
name = kwargs["name"]
session = kwargs["session"]
# the session may be None if the char quits and someone
# else then edits the object
if session:
session.msg(monitor={"name": name, "value": _GA(obj, fieldname)})
def monitor(session, *args, **kwargs):
"""
Adds monitoring to a given property or Attribute.
Kwargs:
name (str): The name of the property or Attribute
to report. No db_* prefix is needed. Only names
in the _monitorable dict earlier in this module
are accepted.
stop (bool): Stop monitoring the above name.
"""
from evennia.scripts.monitorhandler import MONITOR_HANDLER
name = kwargs.get("name", None)
if name and name in _monitorable and session.puppet:
field_name = _monitorable[name]
obj = session.puppet
if kwargs.get("stop", False):
MONITOR_HANDLER.remove(obj, field_name, idstring=session.sessid)
else:
# the handler will add fieldname and obj to the kwargs automatically
MONITOR_HANDLER.add(obj, field_name, _on_monitor_change, idstring=session.sessid,
persistent=False, name=name, session=session)
def unmonitor(session, *args, **kwargs):
"""
Wrapper for turning off monitoring
"""
kwargs["stop"] = True
monitor(session, *args, **kwargs)
def _on_webclient_options_change(**kwargs):
"""
Called when the webclient options stored on the account changes.
Inform the interested clients of this change.
"""
session = kwargs["session"]
obj = kwargs["obj"]
fieldname = kwargs["fieldname"]
clientoptions = _GA(obj, fieldname)
# the session may be None if the char quits and someone
# else then edits the object
if session:
session.msg(webclient_options=clientoptions)
def webclient_options(session, *args, **kwargs):
"""
Handles retrieving and changing of options related to the webclient.
If kwargs is empty (or contains just a "cmdid"), the saved options will be
sent back to the session.
A monitor handler will be created to inform the client of any future options
that changes.
If kwargs is not empty, the key/values stored in there will be persisted
to the account object.
Kwargs:
<option name>: an option to save
"""
account = session.account
clientoptions = account.db._saved_webclient_options
if not clientoptions:
# No saved options for this account, copy and save the default.
account.db._saved_webclient_options = settings.WEBCLIENT_OPTIONS.copy()
# Get the _SaverDict created by the database.
clientoptions = account.db._saved_webclient_options
# The webclient adds a cmdid to every kwargs, but we don't need it.
try:
del kwargs["cmdid"]
except KeyError:
pass
if not kwargs:
# No kwargs: we are getting the stored options
# Convert clientoptions to regular dict for sending.
session.msg(webclient_options=dict(clientoptions))
# Create a monitor. If a monitor already exists then it will replace
# the previous one since it would use the same idstring
from evennia.scripts.monitorhandler import MONITOR_HANDLER
MONITOR_HANDLER.add(account, "_saved_webclient_options",
_on_webclient_options_change,
idstring=session.sessid, persistent=False,
session=session)
else:
# kwargs provided: persist them to the account object
for key, value in kwargs.iteritems():
clientoptions[key] = value
|
bsd-3-clause
| -4,752,840,159,224,686,000 | 32.85439 | 119 | 0.618849 | false |
williballenthin/ida-netnode
|
netnode/test_netnode.py
|
1
|
4807
|
import random
import string
import logging
import contextlib
import idaapi
from netnode import netnode
# get the IDA version number
ida_major, ida_minor = list(map(int, idaapi.get_kernel_version().split(".")))
using_ida7api = (ida_major > 6)
TEST_NAMESPACE = '$ some.namespace'
def get_random_data(N):
'''
returns:
str: a string containing N ASCII characters.
'''
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
@contextlib.contextmanager
def killing_netnode(namespace):
'''
wraps a netnode in a contextmanager that will
eventually destroy its contents.
probably only useful for testing when a clean state is req'd.
'''
n = netnode.Netnode(namespace)
try:
yield n
finally:
n.kill()
def test_basic_features():
'''
demonstrate the basic netnode API (like a dict)
'''
with killing_netnode(TEST_NAMESPACE) as n:
# there is nothing in the netnode to begin with
assert(False == (1 in n))
# when we add one key, there is one thing in it
n[1] = 'hello'
assert(True == (1 in n))
assert(n[1] == 'hello')
# but nothing else
assert(False == ('2' in n))
# then when we add a second thing, its also there
n['2'] = 'world'
assert(True == ('2' in n))
assert(len(list(n.keys())) == 2)
assert(list(n.keys())[0] == 1)
assert(list(n.keys())[1] == '2')
assert(len(list(n.values())) == 2)
assert(list(n.values())[0] == 'hello')
assert(list(n.values())[1] == 'world')
assert(len(list(n.items())) == 2)
# and when we delete the first item, only it is deleted
del n[1]
assert(False == (1 in n))
# and finally everything is gone
del n['2']
def test_large_data():
'''
demonstrate that netnodes support large data values.
'''
with killing_netnode(TEST_NAMESPACE) as n:
random_data = get_random_data(1024 * 8)
n[3] = random_data
assert(n[3] == random_data)
del n[3]
assert(dict(n) == {})
def test_hash_ordering():
'''
the following demonstrates that 'hashes' are iterated alphabetically.
this is an IDAPython implementation feature.
'''
with killing_netnode(TEST_NAMESPACE) as n:
m = n._n
def hashiter(m):
i = None
if using_ida7api:
i = m.hashfirst()
else:
i = m.hash1st()
while i != idaapi.BADNODE and i is not None:
yield i
if using_ida7api:
i = m.hashnext(i)
else:
i = m.hashnxt(i)
def get_hash_order(hiter):
return [k for k in hiter]
m.hashset('a', b'a')
assert get_hash_order(hashiter(m)) == ['a']
m.hashset('c', b'c')
assert get_hash_order(hashiter(m)) == ['a', 'c']
m.hashset('b', b'b')
assert get_hash_order(hashiter(m)) == ['a', 'b', 'c']
def test_iterkeys():
LARGE_VALUE = get_random_data(16 * 1024)
LARGE_VALUE2 = get_random_data(16 * 1024)
import zlib
assert(len(zlib.compress(LARGE_VALUE.encode("ascii"))) > 1024)
assert(len(zlib.compress(LARGE_VALUE2.encode("ascii"))) > 1024)
assert LARGE_VALUE != LARGE_VALUE2
with killing_netnode(TEST_NAMESPACE) as n:
n[1] = LARGE_VALUE
assert set(n.keys()) == set([1])
n[2] = LARGE_VALUE2
assert set(n.keys()) == set([1, 2])
assert n[1] != n[2]
with killing_netnode(TEST_NAMESPACE) as n:
n['one'] = LARGE_VALUE
assert set(n.keys()) == set(['one'])
n['two'] = LARGE_VALUE2
assert set(n.keys()) == set(['one', 'two'])
assert n['one'] != n['two']
with killing_netnode(TEST_NAMESPACE) as n:
n[1] = LARGE_VALUE
assert set(n.keys()) == set([1])
n[2] = LARGE_VALUE
assert set(n.keys()) == set([1, 2])
n['one'] = LARGE_VALUE
assert set(n.keys()) == set([1, 2, 'one'])
n['two'] = LARGE_VALUE
assert set(n.keys()) == set([1, 2, 'one', 'two'])
n[3] = "A"
assert set(n.keys()) == set([1, 2, 'one', 'two', 3])
n['three'] = "A"
assert set(n.keys()) == set([1, 2, 'one', 'two', 3, 'three'])
def main():
logging.basicConfig(level=logging.DEBUG)
# cleanup any existing data
netnode.Netnode(TEST_NAMESPACE).kill()
# rely on assert crashing the interpreter to indicate failure.
# pytest no longer works on py3 idapython.
test_basic_features()
test_large_data()
test_hash_ordering()
test_iterkeys()
print("netnode: tests: pass")
if __name__ == '__main__':
main()
|
apache-2.0
| -8,118,597,814,484,286,000 | 25.125 | 91 | 0.546911 | false |
hasteur/g13bot_tools_new
|
scripts/maintenance/compat2core.py
|
1
|
7684
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
A helper script to convert compat 1.0 scripts to the new core 2.0 framework.
NOTE: Please be aware that this script is not able to convert your codes
completely. It may support you with some automatic replacements and it gives
some warnings and hints for converting. Please refer to the converting guide
README-conversion.txt in the core framework folder and check your codes finally.
The scripts asks for the .py file and converts it to
<scriptname>-core.py in the same directory. The following options are supported:
- warnonly: Do not convert the source but show warning messages. This is good
to check already merged scripts.
usage
to convert a script and show warnings about deprecated methods:
python pwb.py compat2core <scriptname>
to show warnings about deprecated methods:
python pwb.py compat2core <scriptname> -warnonly
"""
#
# (C) xqt, 2014-2017
# (C) Pywikibot team, 2014-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import os
import re
import pywikibot
# be careful with replacement order!
replacements = (
# doc strings
('#\r?\n__version__',
'#\n# Automatically ported from compat branch by compat2core.py script\n'
'#\n__version__'),
('Pywikipedia bot team', 'Pywikibot team'),
# importing changes
('import wikipedia(?: as pywikibot)?', 'import pywikibot'),
('(?<!from pywikibot )import (config|pagegenerators)',
r'from pywikibot import \1'),
('(?<!from pywikibot.compat )import query',
'from pywikibot.compat import query'),
# remove deprecated libs
('import catlib\r?\n', ''),
('import userlib\r?\n', ''),
# change wikipedia to pywikibot, exclude URLs
(r'(?<!\.)wikipedia\.', u'pywikibot.'),
# site instance call
(r'pywikibot\.getSite\s*\(\s*', 'pywikibot.Site('),
# lang is different from code. We should use code in core
(r'([Ss])ite.lang(?:uage\(\))?', r'\1ite.code'),
# change compat library classes to pywikibot intrinsic classes
(r'catlib\.Category\s*\(\s*', 'pywikibot.Category('),
(r'catlib\.change_category\s*\((\s*)(?P<article>.+?),\s*(?P<oldcat>.+?),',
r'\g<article>.change_category(\1\g<oldcat>,'),
(r'userlib\.User\s*\(\s*', 'pywikibot.User('),
# change ImagePage to FilePage
(r'pywikibot\.ImagePage\s*\(\s*', 'pywikibot.FilePage('),
# deprecated title methods
(r'\.urlname\s*\(\s*\)', '.title(asUrl=True)'),
(r'\.urlname\s*\(\s*(?:withNamespace\s*=\s*)?(True|False)+\s*\)',
r'.title(asUrl=True, withNamespace=\1)'),
(r'\.titleWithoutNamespace\s*\(\s*\)', '.title(withNamespace=False)'),
(r'\.sectionFreeTitle\s*\(\s*\)', '.title(withSection=False)'),
(r'\.aslink\s*\(\s*\)', '.title(asLink=True)'),
# other deprecated methods
(r'(?<!site)\.encoding\s*\(\s*\)', '.site.encoding()'),
(r'\.newimages\s*\(\)', ".logevents(logtype='upload')"),
(r'\.newimages\s*\(([^)])', r".logevents(logtype='upload', \1"),
(r'\.getRestrictions\s*\(', '.protection('),
# new core methods and properties
(r'\.get\s*\(\s*get_redirect\s*=\s*True\s*\)', '.text'),
(r'(?:pywikibot|wikipedia)\.verbose', 'config.verbose_output'),
# stopme() is done by the framework itself
(r'(\s*)try\:\s*\r?\n\s+main\(\)\s*\r?\n\s*finally\:\s*\r?\n'
r'\s+pywikibot\.stopme\(\)',
r'\1main()'),
)
# some warnings which must be changed manually
warnings = (
('pywikibot.setAction(',
'setAction() no longer works; you must pass an explicit edit summary\n'
'message to put() or put_async()'),
('.removeImage(',
'Page.removeImage() is deprecated and does not work at core'),
('.replaceImage(',
'Page.replaceImage() is deprecated and does not work at core'),
('.getVersionHistory(',
'Page.getVersionHistory() returns a pywikibot.Timestamp object instead of\n'
'a MediaWiki one'),
('.contributions(',
'User.contributions() returns a pywikibot.Timestamp object instead of a\n'
'MediaWiki one'),
('.getFileMd5Sum(',
'FilePage.getFileMd5Sum() is deprecated should be replaced by '
'FilePage.latest_file_info.sha1'),
(' wikipedia.',
'"wikipedia" library has been changed to "pywikibot".'),
('from wikipedia import',
'"wikipedia" library has been changed to "pywikibot". Please find the\n'
'right way to import your object.'),
('query.GetData(',
'query.GetData() should be replaced by pywikibot.data.api.Request or\n'
'by a direct site request'),
('.verbose',
'verbose_output need "from pywikibot import config" first'),
('templatesWithParams(',
'the first item of each template info is a Page object of the template,\n'
'not the title. '
'Please refer README-conversion.txt and the documentation.'),
)
class ConvertBot(object):
"""Script conversion bot."""
def __init__(self, filename=None, warnonly=False):
"""Constructor."""
self.source = filename
self.warnonly = warnonly
def run(self):
"""Run the bot."""
self.get_source()
self.get_dest()
if not self.warnonly:
self.convert()
self.warning()
def get_source(self):
"""Get source script."""
while True:
if self.source is None:
self.source = pywikibot.input(
'Please input the .py file to convert '
'(no input to leave):')
if not self.source:
exit()
if not self.source.endswith(u'.py'):
self.source += '.py'
if os.path.exists(self.source):
break
self.source = os.path.join('scripts', self.source)
if os.path.exists(self.source):
break
pywikibot.output(u'%s does not exist. Please retry.' % self.source)
self.source = None
def get_dest(self):
"""Ask for destination script name."""
self.dest = u'%s-core.%s' % tuple(self.source.rsplit(u'.', 1))
if not self.warnonly and not pywikibot.input_yn(
u'Destination file is %s.' % self.dest,
default=True, automatic_quit=False):
pywikibot.output('Quitting...')
exit()
def convert(self):
"""Convert script."""
with codecs.open(self.source, 'r', 'utf-8') as f:
text = f.read()
for r in replacements:
text = re.sub(r[0], r[1], text)
with codecs.open(self.dest, 'w', 'utf-8') as g:
g.write(text)
def warning(self):
"""Show warnings and hints."""
filename = self.source if self.warnonly else self.dest
with codecs.open(filename, 'r', 'utf-8') as g:
lines = enumerate(g.readlines(), start=1)
for i, line in lines:
for w in warnings:
if w[0] in line:
pywikibot.warning(
'line {0}: {1}>>> {2}\n'.format(i, line, w[1]))
def main():
"""Process command line arguments and invoke bot."""
filename = None
warnonly = False
# Parse command line arguments for -help option
for arg in pywikibot.handleArgs():
if arg.startswith('-warnonly'):
warnonly = True
elif not arg.startswith('-'):
filename = arg
else:
pywikibot.warning(arg + ' is not supported')
bot = ConvertBot(filename, warnonly)
bot.run()
if __name__ == "__main__":
pywikibot.stopme() # we do not work on any site
main()
|
mit
| -2,691,116,725,510,714,000 | 34.906542 | 81 | 0.598386 | false |
ActiveState/code
|
recipes/Python/170995_Manipulating_infinite_lists_implemented/recipe-170995.py
|
1
|
3490
|
"""Collection of useful functions which work on infinite lists.
The infinite lists are actually the generator objects. Note that
the functions will have side effects on the passed-in gLists.
"""
from __future__ import generators
def gInfinite(obj):
"""Return infinite list of repeated objects obj"""
while 1:
yield obj
gNone = gInfinite(None)
def gJoin(gl1, gl2):
"""Return gl1+gl2, i.e [gl1[0],...,gl1[n],gl2[0],...]
Apparently only useful when gl1 is finite.
"""
for x in gl1:
yield x
for x in gl2:
yield x
def gCon(x, xs):
"""Return [x, xs[0], xs[1], ...]"""
yield x
xs = iter(xs) # make sure it also works for ordinary list
while 1:
yield xs.next()
def gRange(start=0,step=1,stop=None):
"""Generalized version of range() - could be infinite
Note the difference in the order of arguments from those
of range().
"""
if stop is None:
x = int(start)
step = int(step)
while 1:
yield x
x += step
else:
for x in range(start, stop, step):
yield x
def gMap(f, *gLists):
"""Generalized version of map() - work on infinite list
Work differently from map(), stops when the end of the shortest
gList is reached.
"""
if f is None:
f = lambda *x: x
gLists = map(iter, gLists) # make sure it also works for ordinary list
while 1:
yield f(*[gl.next() for gl in gLists])
def gZip(*gLists):
"""Generalized version of zip() - work on infinite list"""
for x in gMap(None, *gLists):
yield x
def gFilter(f, gList):
"""Generalized version of filter() - work on infinite list"""
if f is None:
f = lambda x: x
for x in gList:
# WARNING: may fall into forever loop
# without yielding anything if f(x) is
# always false from a certain x onwards
if f(x):
yield x
def gCompre(f, gList, cond = lambda *x: 1):
"""List Comprehension
[f(*x) for x in gList if cond(*x)]
"""
for x in gList:
# WARNING: may fall into forever loop
# without yielding anything if f(*x) is
# always false from a certain x onwards
if cond(*x):
yield f(*x)
def pList(gList, limit=20):
"""Return partial ordinary list of gList."""
if type(gList) is type(gNone):
return [pList(x[0]) for x in zip(gList, range(limit))]
else:
return gList
if __name__=='__main__':
print pList(gMap(lambda x,y,z: x+y+z, gRange(1), gRange(2,2), gRange(3,3)))
# -> [1+2+3, 2+4+6, 3+6+9, ...]
def f(x,y):
return '%s%i' % (x,y)
def g(x,y):
return y%3==0
print pList(gCompre(f, gZip(gInfinite('A'), gRange(2)), g))
# or pList(gCompre(lambda x,y: '%s%i' % (x,y), gZip(gInfinite('A'), gRange(2)), lambda x,y: y%3==0))
# -> ['A3', 'A6', 'A9', ...]
def sieve(gList):
"""Sieve of Eratosthene"""
x = gList.next()
xs = sieve(gFilter(lambda y: y % x != 0, gList))
for y in gCon(x, xs):
yield y
import sys
sys.setrecursionlimit(sys.maxint) # needed for bigger lists of primes
primes = sieve(gRange(2)) # infinite list of primes
print pList(primes, 100) # print the first 100 primes
print pList(primes, 500) # print subsequent 500 primes
# gList of gLists
print pList(gMap(gRange, gRange()))
|
mit
| -5,975,087,572,587,617,000 | 26.698413 | 104 | 0.566476 | false |
jiriprochazka/lnst
|
obsolete/netconfig.py
|
1
|
6220
|
#! /usr/bin/env python
"""
Netconfig tool
Copyright 2011 Red Hat, Inc.
Licensed under the GNU General Public License, version 2 as
published by the Free Software Foundation; see COPYING for details.
"""
__author__ = """
jpirko@redhat.com (Jiri Pirko)
"""
import getopt
import sys
import logging
import re
import os
from pprint import pprint
from NetConfig.NetConfig import NetConfig
from NetConfig.NetConfigDevice import NetConfigDeviceAllCleanup
from NetConfig.NetConfigDevNames import NetConfigDevNames
from NetTest.NetTestParse import NetConfigParse
from NetTest.NetTestParse import NetMachineConfigParse
from Common.XmlProcessing import XmlDomTreeInit
from Common.Logs import Logs
def usage():
"""
Print usage of this app
"""
print "Usage: netconfig.py [OPTION...] ACTION"
print ""
print "ACTION = [up | down | dump | cleanup | test]"
print ""
print " -d, --debug emit debugging messages"
print " -h, --help print this message"
print " -c, --config=FILE use this net configuration file"
print " -m, --machine-config=FILE use this machine configuration file"
sys.exit()
def prepare_machine_config(machine_file):
tree_init = XmlDomTreeInit()
dom = tree_init.parse_file(machine_file)
machine_dom = dom.getElementsByTagName("netmachineconfig")[0]
data = {"info":{}, "netdevices": {}, "netconfig": {}}
machine_parse = NetMachineConfigParse()
machine_parse.disable_events()
machine_parse.set_recipe(data)
machine_parse.set_machine(0, data)
machine_parse.parse(machine_dom)
return data
def prepare_netconfig(machine_file, config_file):
tree_init = XmlDomTreeInit()
data = prepare_machine_config(machine_file)
dom = tree_init.parse_file(config_file)
config_dom = dom.getElementsByTagName("netconfig")[0]
config_parse = NetConfigParse()
config_parse.disable_events()
config_parse.set_recipe(data)
config_parse.set_machine(0, data)
config_parse.parse(config_dom)
netconfig = NetConfig()
for key, entry in data["netconfig"].iteritems():
netconfig.add_interface_config(key, entry)
return netconfig
def netmachineconfig_to_xml(machine_data):
info = machine_data["info"]
hostname = ""
rootpass = ""
rpcport = ""
if "hostname" in info:
hostname = "hostname=\"%s\" " % info["hostname"]
if "rootpass" in info:
rootpass = "rootpass=\"%s\" " % info["rootpass"]
if "rpcport" in info:
rpcport = "rpcport=\"%s\" " % info["rpcport"]
info_tag = " <info %s%s%s/>\n" % (hostname, rootpass, rpcport)
devices = ""
for phys_id, netdev in machine_data["netdevices"].iteritems():
pid = "phys_id=\"%s\" " % phys_id
dev_type = ""
name = ""
hwaddr = ""
if "type" in netdev:
dev_type = "type=\"%s\" " % netdev["type"]
if "name" in netdev:
name = "name=\"%s\" " % netdev["name"]
if "hwaddr" in netdev:
hwaddr = "hwaddr=\"%s\" " % netdev["hwaddr"]
device_tag = " <netdevice %s%s%s%s/>\n" % (pid, dev_type,
name, hwaddr)
devices += device_tag
return "<netmachineconfig>\n" + info_tag + devices + "</netmachineconfig>"
def main():
"""
Main function
"""
try:
opts, args = getopt.getopt(
sys.argv[1:],
"dhc:m:a:",
["debug", "help", "config=", "machine-config=", "action="]
)
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit()
debug = False
config_path = None
machine_config_path = None
for opt, arg in opts:
if opt in ("-d", "--debug"):
debug = True
elif opt in ("-h", "--help"):
usage()
elif opt in ("-c", "--config"):
config_path = arg
elif opt in ("-m", "--machine-config"):
machine_config_path = arg
Logs(debug)
logging.info("Started")
if not args:
logging.error("No action command passed")
usage();
action = args[0]
if action == "cleanup":
NetConfigDeviceAllCleanup()
return
if not machine_config_path:
logging.error("No machine config xml file passed")
usage();
machine_config_path = os.path.expanduser(machine_config_path)
if action == "refresh":
logging.info("Refreshing machine config")
machine_data = prepare_machine_config(machine_config_path)
dev_names = NetConfigDevNames()
for dev_id, netdev in machine_data["netdevices"].iteritems():
if "name" in netdev:
del netdev["name"]
dev_names.assign_name_by_scan(dev_id, netdev)
output = netmachineconfig_to_xml(machine_data)
handle = open(machine_config_path, "w")
handle.write(output)
handle.close()
return
if not config_path:
logging.error("No net config file/dir passed")
usage();
config_path = os.path.expanduser(config_path)
if action == "test":
'''
Go through specified directory and use all xmls and configs
'''
for root, dirs, files in os.walk(config_path):
for file_name in files:
config_file = os.path.join(config_path, file_name)
if not re.match(r'^.*\.xml$', config_file):
continue
logging.info("Processing config file \"%s\"", config_file)
net_config = prepare_netconfig(machine_config_path,
config_file)
net_config.configure_all()
net_config.deconfigure_all()
return
net_config = prepare_netconfig(machine_config_path, config_path)
if action == "up":
net_config.configure_all()
elif action == "down":
net_config.deconfigure_all()
elif action == "dump":
pprint(net_config.dump_config())
else:
logging.error("unknown action \"%s\"" % action)
if __name__ == "__main__":
main()
|
gpl-2.0
| -657,340,184,908,149,100 | 29.194175 | 89 | 0.581029 | false |
napalm-automation/napalm
|
napalm/base/netmiko_helpers.py
|
1
|
1807
|
# The contents of this file are licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import inspect
from netmiko import BaseConnection
def netmiko_args(optional_args):
"""Check for Netmiko arguments that were passed in as NAPALM optional arguments.
Return a dictionary of these optional args that will be passed into the Netmiko
ConnectHandler call.
"""
fields = inspect.getfullargspec(BaseConnection.__init__)
args = fields[0]
defaults = fields[3]
check_self = args.pop(0)
if check_self != "self":
raise ValueError("Error processing Netmiko arguments")
netmiko_argument_map = dict(zip(args, defaults))
# Netmiko arguments that are integrated into NAPALM already
netmiko_filter = ["ip", "host", "username", "password", "device_type", "timeout"]
# Filter out all of the arguments that are integrated into NAPALM
for k in netmiko_filter:
netmiko_argument_map.pop(k)
# Check if any of these arguments were passed in as NAPALM optional_args
netmiko_optional_args = {}
for k, v in netmiko_argument_map.items():
try:
netmiko_optional_args[k] = optional_args[k]
except KeyError:
pass
# Return these arguments for use with establishing Netmiko SSH connection
return netmiko_optional_args
|
apache-2.0
| -5,194,614,369,650,901,000 | 36.645833 | 85 | 0.713337 | false |
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/migrate/test_migrate_vm_console_access.py
|
1
|
1292
|
'''
New Integration test for testing console access after vm migration between hosts.
@author: Quarkonics
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.account_operations as acc_ops
vm = None
test_stub = test_lib.lib_get_specific_stub()
def test():
global vm
vm = test_stub.create_vr_vm('migrate_vm', 'imageName_s', 'l3VlanNetwork2')
vm.check()
test_stub.migrate_vm_to_random_host(vm)
vm.check()
session_uuid = acc_ops.login_as_admin()
console = test_lib.lib_get_vm_console_address(vm.get_vm().uuid, session_uuid)
if test_lib.lib_network_check(console.hostIp, console.port):
test_util.test_logger('[vm:] %s console on %s:%s is connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
else:
test_util.test_fail('[vm:] %s console on %s:%s is not connectable' % (vm.get_vm().uuid, console.hostIp, console.port))
acc_ops.logout(session_uuid)
vm.destroy()
test_util.test_pass('Migrate VM Console Access Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
global vm
if vm:
try:
vm.destroy()
except:
pass
|
apache-2.0
| -5,268,923,533,346,941,000 | 28.761905 | 126 | 0.651703 | false |
xray/xray
|
xarray/core/options.py
|
1
|
5201
|
import warnings
DISPLAY_WIDTH = "display_width"
ARITHMETIC_JOIN = "arithmetic_join"
ENABLE_CFTIMEINDEX = "enable_cftimeindex"
FILE_CACHE_MAXSIZE = "file_cache_maxsize"
WARN_FOR_UNCLOSED_FILES = "warn_for_unclosed_files"
CMAP_SEQUENTIAL = "cmap_sequential"
CMAP_DIVERGENT = "cmap_divergent"
KEEP_ATTRS = "keep_attrs"
DISPLAY_STYLE = "display_style"
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: "inner",
ENABLE_CFTIMEINDEX: True,
FILE_CACHE_MAXSIZE: 128,
WARN_FOR_UNCLOSED_FILES: False,
CMAP_SEQUENTIAL: "viridis",
CMAP_DIVERGENT: "RdBu_r",
KEEP_ATTRS: "default",
DISPLAY_STYLE: "html",
}
_JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
_DISPLAY_OPTIONS = frozenset(["text", "html"])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),
KEEP_ATTRS: lambda choice: choice in [True, False, "default"],
DISPLAY_STYLE: _DISPLAY_OPTIONS.__contains__,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):
warnings.warn(
"The enable_cftimeindex option is now a no-op "
"and will be removed in a future version of xarray.",
FutureWarning,
)
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,
}
def _get_keep_attrs(default):
global_choice = OPTIONS["keep_attrs"]
if global_choice == "default":
return default
elif global_choice in [True, False]:
return global_choice
else:
raise ValueError(
"The global option keep_attrs must be one of" " True, False or 'default'."
)
class set_options:
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``warn_for_unclosed_files``: whether or not to issue a warning when
unclosed files are deallocated (default False). This is mostly useful
for debugging.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``keep_attrs``: rule for whether to keep attributes on xarray
Datasets/dataarrays after operations. Either ``True`` to always keep
attrs, ``False`` to always discard them, or ``'default'`` to use original
logic that attrs should only be kept in unambiguous circumstances.
Default: ``'default'``.
- ``display_style``: display style to use in jupyter for xarray objects.
Default: ``'text'``. Other options are ``'html'``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 3 4 5 6 ...
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80)
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == ARITHMETIC_JOIN:
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == DISPLAY_STYLE:
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
|
apache-2.0
| -791,005,009,121,592,200 | 32.127389 | 86 | 0.617958 | false |
Inspq/ansible
|
lib/ansible/modules/cloud/ovirt/ovirt_networks.py
|
1
|
9528
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_networks
short_description: Module to manage logical networks in oVirt
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage logical networks in oVirt"
options:
name:
description:
- "Name of the the network to manage."
required: true
state:
description:
- "Should the network be present or absent"
choices: ['present', 'absent']
default: present
data_center:
description:
- "Datacenter name where network reside."
description:
description:
- "Description of the network."
comment:
description:
- "Comment of the network."
vlan_tag:
description:
- "Specify VLAN tag."
vm_network:
description:
- "If I(True) network will be marked as network for VM."
- "VM network carries traffic relevant to the virtual machine."
mtu:
description:
- "Maximum transmission unit (MTU) of the network."
clusters:
description:
- "List of dictionaries describing how the network is managed in specific cluster."
- "C(name) - Cluster name."
- "C(assigned) - I(true) if the network should be assigned to cluster. Default is I(true)."
- "C(required) - I(true) if the network must remain operational for all hosts associated with this network."
- "C(display) - I(true) if the network should marked as display network."
- "C(migration) - I(true) if the network should marked as migration network."
- "C(gluster) - I(true) if the network should marked as gluster network."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create network
- ovirt_networks:
data_center: mydatacenter
name: mynetwork
vlan_tag: 1
vm_network: true
# Remove network
- ovirt_networks:
state: absent
name: mynetwork
'''
RETURN = '''
id:
description: "ID of the managed network"
returned: "On success if network is found."
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
network:
description: "Dictionary of all the network attributes. Network attributes can be found on your oVirt instance
at following url: https://ovirt.example.com/ovirt-engine/api/model#types/network."
returned: "On success if network is found."
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
check_params,
create_connection,
equal,
ovirt_full_argument_spec,
search_by_name,
)
class NetworksModule(BaseModule):
def build_entity(self):
return otypes.Network(
name=self._module.params['name'],
comment=self._module.params['comment'],
description=self._module.params['description'],
data_center=otypes.DataCenter(
name=self._module.params['data_center'],
) if self._module.params['data_center'] else None,
vlan=otypes.Vlan(
self._module.params['vlan_tag'],
) if self._module.params['vlan_tag'] else None,
usages=[
otypes.NetworkUsage.VM if self._module.params['vm_network'] else None
] if self._module.params['vm_network'] is not None else None,
mtu=self._module.params['mtu'],
)
def update_check(self, entity):
return (
equal(self._module.params.get('comment'), entity.comment) and
equal(self._module.params.get('description'), entity.description) and
equal(self._module.params.get('vlan_tag'), getattr(entity.vlan, 'id', None)) and
equal(self._module.params.get('vm_network'), True if entity.usages else False) and
equal(self._module.params.get('mtu'), entity.mtu)
)
class ClusterNetworksModule(BaseModule):
def __init__(self, network_id, cluster_network, *args, **kwargs):
super(ClusterNetworksModule, self).__init__(*args, **kwargs)
self._network_id = network_id
self._cluster_network = cluster_network
def build_entity(self):
return otypes.Network(
id=self._network_id,
name=self._module.params['name'],
required=self._cluster_network.get('required'),
display=self._cluster_network.get('display'),
usages=[
otypes.NetworkUsage(usage)
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
] if (
self._cluster_network.get('display') is not None or
self._cluster_network.get('gluster') is not None or
self._cluster_network.get('migration') is not None
) else None,
)
def update_check(self, entity):
return (
equal(self._cluster_network.get('required'), entity.required) and
equal(self._cluster_network.get('display'), entity.display) and
equal(
sorted([
usage
for usage in ['display', 'gluster', 'migration']
if self._cluster_network.get(usage, False)
]),
sorted([
str(usage)
for usage in getattr(entity, 'usages', [])
# VM + MANAGEMENT is part of root network
if usage != otypes.NetworkUsage.VM and usage != otypes.NetworkUsage.MANAGEMENT
]),
)
)
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
data_center=dict(default=None, required=True),
name=dict(default=None, required=True),
description=dict(default=None),
comment=dict(default=None),
vlan_tag=dict(default=None, type='int'),
vm_network=dict(default=None, type='bool'),
mtu=dict(default=None, type='int'),
clusters=dict(default=None, type='list'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
check_sdk(module)
check_params(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
clusters_service = connection.system_service().clusters_service()
networks_service = connection.system_service().networks_service()
networks_module = NetworksModule(
connection=connection,
module=module,
service=networks_service,
)
state = module.params['state']
search_params = {
'name': module.params['name'],
'datacenter': module.params['data_center'],
}
if state == 'present':
ret = networks_module.create(search_params=search_params)
# Update clusters networks:
if module.params.get('clusters') is not None:
for param_cluster in module.params.get('clusters'):
cluster = search_by_name(clusters_service, param_cluster.get('name'))
if cluster is None:
raise Exception("Cluster '%s' was not found." % param_cluster.get('name'))
cluster_networks_service = clusters_service.service(cluster.id).networks_service()
cluster_networks_module = ClusterNetworksModule(
network_id=ret['id'],
cluster_network=param_cluster,
connection=connection,
module=module,
service=cluster_networks_service,
)
if param_cluster.get('assigned', True):
ret = cluster_networks_module.create()
else:
ret = cluster_networks_module.remove()
elif state == 'absent':
ret = networks_module.remove(search_params=search_params)
module.exit_json(**ret)
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
gpl-3.0
| -7,966,133,857,572,089,000 | 34.552239 | 120 | 0.592045 | false |
tanzaho/python-goose
|
tests/extractors.py
|
1
|
18282
|
# -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by Xavier Grangier for Recrutae
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import json
import re
from base import BaseMockTests, MockResponse
from goose import Goose
from goose.utils import FileHelper
from goose.configuration import Configuration
from goose.text import StopWordsChinese
from goose.text import StopWordsArabic
from goose.text import StopWordsKorean
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
class MockResponseExtractors(MockResponse):
def content(self, req):
current_test = self.cls._get_current_testname()
path = os.path.join(CURRENT_PATH, "data", "extractors", "%s.html" % current_test)
path = os.path.abspath(path)
content = FileHelper.loadResourceFile(path)
return content
class TestExtractionBase(BaseMockTests):
"""\
Extraction test case
"""
callback = MockResponseExtractors
def getRawHtml(self):
return self.load_test_file('.html')
def loadData(self):
content = self.load_test_file('.json')
self.data = json.loads(content)
def load_content_html(self):
self.expected_content_html = self.load_test_file('.content.html')
def load_test_file(self, suffix):
suite, module, cls, func = self.id().split('.')
path = os.path.join(CURRENT_PATH, "data", module, "%s%s" % (func, suffix))
path = os.path.abspath(path)
try:
return FileHelper.loadResourceFile(path)
except IOError:
pass
def assert_cleaned_text(self, field, expected_value, result_value):
"""\
"""
# # TODO : handle verbose level in tests
# print "\n=======================::. ARTICLE REPORT %s .::======================\n" % self.id()
# print 'expected_value (%s) \n' % len(expected_value)
# print expected_value
# print "-------"
# print 'result_value (%s) \n' % len(result_value)
# print result_value
# cleaned_text is Null
msg = u"Resulting article text was NULL!"
self.assertNotEqual(result_value, None, msg=msg)
# cleaned_text length
msg = u"Article text was not as long as expected beginning!"
self.assertTrue(len(expected_value) <= len(result_value), msg=msg)
# clean_text value
result_value = result_value[0:len(expected_value)]
msg = u"The beginning of the article text was not as expected!"
self.assertEqual(expected_value, result_value, msg=msg)
def assert_tags(self, field, expected_value, result_value):
"""\
"""
# as we have a set in expected_value and a list in result_value
# make result_value a set
expected_value = set(expected_value)
# check if both have the same number of items
msg = (u"expected tags set and result tags set"
u"don't have the same number of items")
self.assertEqual(len(result_value), len(expected_value), msg=msg)
# check if each tag in result_value is in expected_value
for tag in result_value:
self.assertTrue(tag in expected_value)
def runArticleAssertions(self, article, fields):
"""\
"""
for field in fields:
expected_value = self.data['expected'][field]
result_value = getattr(article, field, None)
# custom assertion for a given field
assertion = 'assert_%s' % field
if hasattr(self, assertion):
getattr(self, assertion)(field, expected_value, result_value)
continue
# default assertion
msg = u"Error %s" % field
self.assertEqual(expected_value, result_value, msg=msg)
def assert_content_html(self, article):
expected_content_html = re.sub('\s', '', self.expected_content_html)
actual_content_html = re.sub('\s', '', article.content_html).decode("utf8")
msg = u"HTML content is incorrect\n\n"
msg += "Expected: %s\n\n" % self.expected_content_html
msg += "Actual: %s" % article.content_html.decode("utf8")
self.assertEqual(expected_content_html, actual_content_html, msg=msg)
def extract(self, instance):
article = instance.extract(url=self.data['url'])
return article
def getConfig(self):
config = Configuration()
config.enable_image_fetching = False
return config
def getArticle(self):
"""\
"""
# load test case data
self.loadData()
self.load_content_html()
# basic configuration
# no image fetching
config = self.getConfig()
self.parser = config.get_parser()
# target language
# needed for non english language most of the time
target_language = self.data.get('target_language')
if target_language:
config.target_language = target_language
config.use_meta_language = False
# run goose
g = Goose(config=config)
return self.extract(g)
class TestExtractions(TestExtractionBase):
def test_allnewlyrics1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnn1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek2(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessWeek3(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cbslocal(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_elmondo1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_elpais(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_liberation(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_lefigaro(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_techcrunch1(self):
return 'pending'
article = self.getArticle()
fields = ['title', 'cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_foxNews(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_aolNews(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_huffingtonPost2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_testHuffingtonPost(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'title', ]
self.runArticleAssertions(article=article, fields=fields)
def test_espn(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_engadget(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_msn1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
# #########################################
# # FAIL CHECK
# # UNICODE
# def test_guardian1(self):
# article = self.getArticle()
# fields = ['cleaned_text']
# self.runArticleAssertions(article=article, fields=fields)
def test_time(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'title']
self.runArticleAssertions(article=article, fields=fields)
def test_time2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnet(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_yahoo(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_politico(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider2(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_businessinsider3(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_cnbc1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_marketplace(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue24(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue25(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue28(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue32(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_issue4(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_gizmodo1(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'meta_keywords']
self.runArticleAssertions(article=article, fields=fields)
def test_mashable_issue_74(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_usatoday_issue_74(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_okaymarketing(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
def test_bbc(self):
article = self.getArticle()
self.assert_content_html(article)
def test_huffingtonpost(self):
article = self.getArticle()
self.assert_content_html(article)
def test_theguardian(self):
article = self.getArticle()
self.assert_content_html(article)
def test_blockquotes(self):
article = self.getArticle()
self.assert_content_html(article)
def test_open_graph_content(self):
article = self.getArticle()
self.assert_content_html(article)
def test_clean_bad_tags(self):
article = self.getArticle()
self.assert_content_html(article)
def test_embedded_media_items(self):
article = self.getArticle()
self.assert_content_html(article)
class TestKnownHosts(TestExtractionBase):
def test_known_host_selectors(self):
article = self.getArticle()
self.assert_content_html(article)
def test_known_host_selectors_with_regexs_references(self):
article = self.getArticle()
self.assert_content_html(article)
class TestRelativePaths(TestExtractionBase):
def test_relative_paths(self):
article = self.getArticle()
self.assert_content_html(article)
def test_tags_with_no_path(self):
article = self.getArticle()
self.assert_content_html(article)
class TestReplacingAttributes(TestExtractionBase):
def test_replacing_attributes(self):
article = self.getArticle()
self.assert_content_html(article)
class TestPublishDate(TestExtractionBase):
def test_publish_date(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
def test_publish_date_rnews(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
def test_publish_date_article(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['publish_date'])
class TestMetaDescription(TestExtractionBase):
def test_meta_description(self):
article = self.getArticle()
self.runArticleAssertions(article=article, fields=['meta_description'])
class TestExtractWithUrl(TestExtractionBase):
def test_get_canonical_url(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'canonical_link']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractChinese(TestExtractionBase):
def getConfig(self):
config = super(TestExtractChinese, self).getConfig()
config.stopwords_class = StopWordsChinese
return config
def test_bbc_chinese(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractArabic(TestExtractionBase):
def getConfig(self):
config = super(TestExtractArabic, self).getConfig()
config.stopwords_class = StopWordsArabic
return config
def test_cnn_arabic(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractKorean(TestExtractionBase):
def getConfig(self):
config = super(TestExtractKorean, self).getConfig()
config.stopwords_class = StopWordsKorean
return config
def test_donga_korean(self):
return 'pending'
article = self.getArticle()
fields = ['cleaned_text', 'meta_description', 'meta_keywords']
self.runArticleAssertions(article=article, fields=fields)
class TestExtractionsRaw(TestExtractions):
def extract(self, instance):
article = instance.extract(raw_html=self.getRawHtml())
return article
def test_bbc(self):
return 'pending'
class TestArticleTags(TestExtractionBase):
def test_tags_kexp(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_deadline(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_wnyc(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_cnet(self):
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
def test_tags_abcau(self):
"""
Test ABC Australia page with "topics" tags
"""
article = self.getArticle()
fields = ['tags']
self.runArticleAssertions(article=article, fields=fields)
|
apache-2.0
| -3,193,002,466,137,419,000 | 30.905759 | 104 | 0.639481 | false |
SotongDJ/SotongDJ-PythonLab
|
Exp6-wget-batch-tools/progress/main.py
|
1
|
1524
|
import os
def status():
print "----RESULT-----------"
os.system("ls -1>progress/file.tmp")
for wgetlog in open("progress/file.tmp").read().splitlines():
if "wget-log" in wgetlog:
percentage="0%"
for line in open(wgetlog).read().splitlines():
if "K ." in line or "100%" in line or "K =" in line:
if "100%" not in line:
#print "mark0:"+percentage
tpo=int(percentage.replace("%",""))
for sect in line.split(" "):
# print "rub:"+sect
if "%" in sect:
a=sect
#print "a:"+a
#print "mark1:"+percentage
tpn=int(a.replace("%",''))
if tpn > tpo:
percentage=a
# print "mark2:"+percentage
#print "mark3:"+percentage
elif "100%" in line:
percentage="Finished"
print wgetlog+":"+percentage
print "---------------------"
command="i"
while command!="n":
command=raw_input("Which you want?\n\"w\" for start a new wget process\n\"c\" for check the status and repeat this sctipt\n\"n\" for the end\nYour selection:\n")
if command=="w":
os.system("wget -bc \""+raw_input("Copy and paste your target url:\n")+"\"")
elif command=="c":
status()
|
gpl-3.0
| 8,098,244,853,867,098,000 | 42.542857 | 165 | 0.434383 | false |
onepercentclub/django-token-auth
|
setup.py
|
1
|
1301
|
#!/usr/bin/env python
import os
import setuptools
import token_auth
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setuptools.setup(
name="django-token-auth",
version=token_auth.__version__,
packages=setuptools.find_packages(),
include_package_data=True,
license='BSD',
description='Token Authentication for Bluebottle',
long_description=README,
url="http://onepercentclub.com",
author="1%Club Developers",
author_email="devteam@onepercentclub.com",
install_requires=[
'Django>=1.6.8',
'pycrypto>=2.6.1',
'python-saml==2.1.7'
],
tests_require=[
'django_nose>=1.4',
'factory-boy==2.3.1',
'django-setuptest==0.1.4',
'mock==1.0.1'
],
test_suite="token_auth.runtests.runtests",
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: None',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content'
]
)
|
gpl-2.0
| -2,578,384,992,987,437,000 | 27.911111 | 78 | 0.607225 | false |
Superjom/NeuralNetworks
|
models/mlp.py
|
1
|
5692
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Feb 24, 2014
@author: Chunwei Yan @ PKU
@mail: yanchunwei@outlook.com
'''
import sys
sys.path.append('..')
import numpy
import theano
from theano import tensor as T
from softmax_regression import SoftmaxRegression
class HiddenLayer(object):
''' a layer of neurons '''
def __init__(self, input, n_visible, n_output, rng,
activation=T.tanh, W=None, b=None, learning_rate=0.01):
if not rng:
rng = numpy.random.RandomState(1234)
self.rng = rng
#print 'n_output, n_visible', n_output, n_visible
if not W:
initial_W = numpy.asarray(
rng.uniform(
low=-4 * numpy.sqrt(6. / (n_output + n_visible)),
high=4 * numpy.sqrt(6. / (n_output + n_visible)),
size=(n_visible, n_output)), dtype=theano.config.floatX)
if activation == theano.tensor.nnet.sigmoid:
initial_W = numpy.asarray(
rng.uniform(
low=-16 * numpy.sqrt(6. / (n_output + n_visible)),
high=16 * numpy.sqrt(6. / (n_output + n_visible)),
size=(n_visible, n_output)), dtype=theano.config.floatX)
W = theano.shared(
value=initial_W,
name='W',
borrow=True,
)
T.unbroadcast(W)
if not b:
b_values = numpy.zeros((n_output,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.X = input
self.W = W
self.b = b
self.learning_rate = learning_rate
self.n_visible, self.n_output = n_visible, n_output
self.activation = activation
self.params = [self.W, self.b]
# a output hock
self.output = self.activation(
T.dot(self.X, self.W) + self.b)
class MultiLayerPerceptron(object):
def __init__(self, rng=None, input=None, n_visible=100, n_hidden=50, n_output=10,
L1_reg=0.0, L2_reg=0.01, learning_rate=0.001):
'''
a network with two layers
:parameters:
n_visible: int
number of visible(input) nodes
n_hidden: int
number of hidden nodes
'''
self.x = input
self.learning_rate = learning_rate
self.L1_reg, self.L2_reg = L1_reg, L2_reg
if not input:
self.x = T.fvector('x')
# create two layers
self.hidden_layer = HiddenLayer(
rng = rng,
input = input,
n_visible = n_visible,
n_output = n_hidden,
activation = T.tanh
)
self.output_layer = SoftmaxRegression(
input = self.hidden_layer.output,
n_features = n_hidden,
n_states = n_output,
)
# methods mapper
self.negative_log_likelihood = self.output_layer.negative_log_likelihood
self.errors = self.output_layer.errors
def get_cost(self):
self.y = T.bscalar('y')
self.L1 = abs(self.hidden_layer.W).sum() \
+ abs(self.output_layer.W).sum()
self.L2_sqr = (self.hidden_layer.W ** 2).sum() \
+ (self.output_layer.W ** 2).sum()
self.params = self.hidden_layer.params + self.output_layer.params
self.cost = self.negative_log_likelihood(self.y) \
+ self.L2_reg * self.L2_sqr
#+ self.L1_reg * self.L1
return self.cost
def compile(self):
cost = self.get_cost()
# predict model
self.predict = theano.function(
inputs = [self.x],
outputs = self.output_layer.y_pred
)
gparams = []
for param in self.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = []
for param, gparam in zip(self.params, gparams):
up = T.cast(param - self.learning_rate * gparam,
theano.config.floatX)
updates.append(
(param, up))
#print 'updates', updates
# train model
self.trainer = theano.function(
inputs = [self.x, self.y],
outputs = self.errors(self.y),
updates = updates)
if __name__ == '__main__':
x = T.fvector('x')
mlp = MultiLayerPerceptron(
input = x,
n_visible = 50,
n_hidden = 20,
n_output = 5,
learning_rate = 0.03,
)
print 'type of W', type(mlp.hidden_layer.W)
mlp.compile()
rng = numpy.random
x_set = rng.randn(400, 50).astype(theano.config.floatX)
y_set = rng.randint(size=400, low=0, high=5).astype(theano.config.floatX)
n_rcds = x_set.shape[0]
#print 'hid.b:\t', mlp.hidden_layer.b.eval()
#print 'output.b:\t', mlp.output_layer.b.eval()
for no in xrange(100):
errors = []
y_preds = []
for i in xrange(n_rcds):
x = numpy.array(x_set[i]).astype(
theano.config.floatX)
y = y_set[i]
y_pred = mlp.predict(x)[0]
error = mlp.trainer(x, y)
#print 'error', error
errors.append(error)
y_preds.append(y_pred)
e = numpy.array(errors).mean()
print "%dth\t%f" % (no, e)
print "original:\t", y_set[:30]
print "predict:\t", y_preds[:30]
#print 'hid.b:\t', mlp.hidden_layer.b.eval()
#print 'output.b:\t', mlp.output_layer.b.eval()
if __name__ == "__main__":
pass
|
apache-2.0
| -4,254,905,750,425,097,700 | 29.276596 | 85 | 0.514055 | false |
cloud-fan/spark
|
python/pyspark/pandas/tests/data_type_ops/test_binary_ops.py
|
1
|
6682
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark import pandas as ps
from pyspark.pandas.config import option_context
from pyspark.pandas.tests.data_type_ops.testing_utils import TestCasesUtils
from pyspark.testing.pandasutils import PandasOnSparkTestCase
class BinaryOpsTest(PandasOnSparkTestCase, TestCasesUtils):
@property
def pser(self):
return pd.Series([b"1", b"2", b"3"])
@property
def psser(self):
return ps.from_pandas(self.pser)
def test_add(self):
psser = self.psser
pser = self.pser
self.assert_eq(psser + b"1", pser + b"1")
self.assert_eq(psser + psser, pser + pser)
self.assert_eq(psser + psser.astype("bytes"), pser + pser.astype("bytes"))
self.assertRaises(TypeError, lambda: psser + "x")
self.assertRaises(TypeError, lambda: psser + 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser + psser)
self.assert_eq(self.psser + self.psser, self.pser + self.pser)
def test_sub(self):
self.assertRaises(TypeError, lambda: self.psser - "x")
self.assertRaises(TypeError, lambda: self.psser - 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser - psser)
def test_mul(self):
self.assertRaises(TypeError, lambda: self.psser * "x")
self.assertRaises(TypeError, lambda: self.psser * 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser * psser)
def test_truediv(self):
self.assertRaises(TypeError, lambda: self.psser / "x")
self.assertRaises(TypeError, lambda: self.psser / 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser / psser)
def test_floordiv(self):
self.assertRaises(TypeError, lambda: self.psser // "x")
self.assertRaises(TypeError, lambda: self.psser // 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser // psser)
def test_mod(self):
self.assertRaises(TypeError, lambda: self.psser % "x")
self.assertRaises(TypeError, lambda: self.psser % 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser % psser)
def test_pow(self):
self.assertRaises(TypeError, lambda: self.psser ** "x")
self.assertRaises(TypeError, lambda: self.psser ** 1)
with option_context("compute.ops_on_diff_frames", True):
for psser in self.pssers:
self.assertRaises(TypeError, lambda: self.psser ** psser)
def test_radd(self):
self.assert_eq(b"1" + self.psser, b"1" + self.pser)
self.assertRaises(TypeError, lambda: "x" + self.psser)
self.assertRaises(TypeError, lambda: 1 + self.psser)
def test_rsub(self):
self.assertRaises(TypeError, lambda: "x" - self.psser)
self.assertRaises(TypeError, lambda: 1 - self.psser)
def test_rmul(self):
self.assertRaises(TypeError, lambda: "x" * self.psser)
self.assertRaises(TypeError, lambda: 2 * self.psser)
def test_rtruediv(self):
self.assertRaises(TypeError, lambda: "x" / self.psser)
self.assertRaises(TypeError, lambda: 1 / self.psser)
def test_rfloordiv(self):
self.assertRaises(TypeError, lambda: "x" // self.psser)
self.assertRaises(TypeError, lambda: 1 // self.psser)
def test_rmod(self):
self.assertRaises(TypeError, lambda: 1 % self.psser)
def test_rpow(self):
self.assertRaises(TypeError, lambda: "x" ** self.psser)
self.assertRaises(TypeError, lambda: 1 ** self.psser)
def test_and(self):
self.assertRaises(TypeError, lambda: self.psser & True)
self.assertRaises(TypeError, lambda: self.psser & False)
self.assertRaises(TypeError, lambda: self.psser & self.psser)
def test_rand(self):
self.assertRaises(TypeError, lambda: True & self.psser)
self.assertRaises(TypeError, lambda: False & self.psser)
def test_or(self):
self.assertRaises(TypeError, lambda: self.psser | True)
self.assertRaises(TypeError, lambda: self.psser | False)
self.assertRaises(TypeError, lambda: self.psser | self.psser)
def test_ror(self):
self.assertRaises(TypeError, lambda: True | self.psser)
self.assertRaises(TypeError, lambda: False | self.psser)
def test_from_to_pandas(self):
data = [b"1", b"2", b"3"]
pser = pd.Series(data)
psser = ps.Series(data)
self.assert_eq(pser, psser.to_pandas())
self.assert_eq(ps.from_pandas(pser), psser)
def test_astype(self):
pser = self.pser
psser = self.psser
self.assert_eq(pd.Series(["1", "2", "3"]), psser.astype(str))
self.assert_eq(pser.astype("category"), psser.astype("category"))
cat_type = CategoricalDtype(categories=[b"2", b"3", b"1"])
self.assert_eq(pser.astype(cat_type), psser.astype(cat_type))
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.data_type_ops.test_binary_ops import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
| -3,714,611,770,325,020,700 | 38.305882 | 87 | 0.65669 | false |
z123/build-a-saas-app-with-flask
|
catwatch/tests/billing/test_views.py
|
1
|
7627
|
from flask import url_for, json
from flask_babel import gettext as _
from catwatch.tests.lib.util import ViewTestMixin
from catwatch.tests.lib.assertions import assert_status_with_message
class TestBilling(ViewTestMixin):
def test_pricing_page(self):
""" Pricing page renders successfully. """
response = self.client.get(url_for('billing.pricing'))
assert_status_with_message(200, response, 'Sign up')
def test_pricing_page_logged_in(self):
""" Pricing page renders successfully. """
self.login()
response = self.client.get(url_for('billing.pricing'))
assert_status_with_message(200, response, 'Continue')
def test_pricing_page_as_subscriber(self, subscriptions):
""" Pricing page for subscribers should redirect to update. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.pricing'),
follow_redirects=True)
assert_status_with_message(200, response, 'Change plan')
def test_coupon_code_not_valid(self):
""" Coupon code should not be processed, """
self.login()
params = {'coupon_code': None}
response = self.client.post(url_for('billing.coupon_code'),
data=params, follow_redirects=True)
data = json.loads(response.data)
assert response.status_code == 422
assert data['error'] == _('Discount code cannot be processed.')
def test_coupon_code_not_redeemable(self):
""" Coupon code should be redeemable. """
self.login()
params = {'coupon_code': 'foo'}
response = self.client.post(url_for('billing.coupon_code'),
data=params, follow_redirects=True)
data = json.loads(response.data)
assert response.status_code == 404
assert data['error'] == _('Discount code not found.')
def test_subscription_create_page(self):
""" Subscription create page renders successfully. """
self.login()
response = self.client.get(url_for('billing.create'),
follow_redirects=True)
assert response.status_code == 200
def test_subscription_create_as_subscriber(self, subscriptions):
""" Subscribers should not be allowed to create a subscription. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.create'),
follow_redirects=True)
assert_status_with_message(200, response,
_('You already have an active subscription'
'.'))
def test_subscription_create(self, users, mock_stripe):
""" Subscription create requires javascript. """
self.login()
params = {
'stripe_key': 'cus_000',
'plan': 'gold',
'name': 'Foobar Johnson'
}
response = self.client.post(url_for('billing.create'),
data=params, follow_redirects=True)
assert_status_with_message(200, response,
_('You must enable Javascript for this '
'request.'))
def test_subscription_update_page_without_subscription(self):
""" Subscription update page redirects to pricing page. """
self.login()
response = self.client.get(url_for('billing.update'),
follow_redirects=True)
assert_status_with_message(200, response, "You're moments away")
def test_subscription_update_page(self, subscriptions):
""" Subscription update page renders successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.update'),
follow_redirects=True)
assert_status_with_message(200, response,
"You're about to change plans")
def test_subscription_update(self, subscriptions, mock_stripe):
""" Subscription create adds a new subscription. """
self.login(identity='subscriber@localhost.com')
params = {
'submit_gold': ''
}
response = self.client.post(url_for('billing.update'),
data=params, follow_redirects=True)
assert response.status_code == 200
def test_subscription_cancel_page_without_subscription(self):
""" Subscription cancel page redirects to settings. """
self.login()
response = self.client.get(url_for('billing.cancel'),
follow_redirects=True)
assert_status_with_message(200, response,
_('You do not have an active subscription'
'.'))
def test_subscription_cancel_page(self, subscriptions):
""" Subscription cancel page renders successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.cancel'),
follow_redirects=True)
assert response.status_code == 200
def test_subscription_cancel(self, subscriptions, mock_stripe):
""" Subscription cancel is successful. """
self.login(identity='subscriber@localhost.com')
response = self.client.post(url_for('billing.cancel'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('Sorry to see you go, your subscription '
'has been cancelled.'))
def test_subscription_update_payment_method_without_card(self):
""" Subscription update method without card should fail. """
self.login()
response = self.client.post(url_for('billing.update_payment_method'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('You do not have a payment method on '
'file.'))
def test_subscription_update_payment_method(self, subscriptions,
mock_stripe):
""" Subscription update payment requires javascript. """
self.login(identity='subscriber@localhost.com')
response = self.client.post(url_for('billing.update_payment_method'),
data={}, follow_redirects=True)
assert_status_with_message(200, response,
_('You must enable Javascript for this '
'request.'))
def test_subscription_billing_history(self, subscriptions, mock_stripe):
""" Subscription billing history should render successfully. """
self.login(identity='subscriber@localhost.com')
response = self.client.get(url_for('billing.billing_history'))
assert_status_with_message(200, response,
'Billing details and history')
def test_subscription_billing_history_without_sub(self, mock_stripe):
""" Subscription billing history without sub should still work. """
self.login()
response = self.client.get(url_for('billing.billing_history'))
assert_status_with_message(200, response,
'Billing details and history')
|
mit
| 1,646,622,906,478,730,200 | 38.931937 | 78 | 0.57126 | false |
youknowone/instantauth
|
python/instantauth/cryptors/aes.py
|
1
|
2071
|
"""
Install 'pycrypto' package to use this module
"""
from Crypto.Cipher import AES
from . import BaseCryptor
def add_padding(data, block_size):
data_len = len(data)
pad_len = (block_size - data_len) % block_size
if pad_len == 0:
pad_len = block_size
padding = chr(pad_len)
return ''.join((data, padding * pad_len))
def strip_padding(data):
padding = data[-1]
pad_len = ord(padding)
return data[:-pad_len]
def cut_key(key, key_size):
while len(key) < key_size: # ...
key += chr(0) * key_size
return key[:key_size]
class AESCryptor(BaseCryptor):
BLOCK_SIZE = 16
def __init__(self, bits=128, iv=''): # iv is useless for temporary data in usual case
if not bits in (128, 192, 256):
raise ValueError(bits) # make one
self.key_size = bits / 8
self.iv = cut_key(iv, self.BLOCK_SIZE)
def encrypt_stream(self, stream, secret_key):
secret_key = cut_key(secret_key, self.key_size)
cipher = AES.new(secret_key, AES.MODE_CBC, self.iv)
padded = add_padding(stream, self.BLOCK_SIZE)
encrypted = cipher.encrypt(padded)
return encrypted
def decrypt_stream(self, stream, secret_key):
secret_key = cut_key(secret_key, self.key_size)
cipher = AES.new(secret_key, AES.MODE_CBC, self.iv)
decrypted = cipher.decrypt(stream)
return strip_padding(decrypted)
def encrypt_data(self, data, private_key, secret_key):
secret_key = cut_key(secret_key, self.key_size)
iv = cut_key(private_key, self.BLOCK_SIZE)
cipher = AES.new(secret_key, AES.MODE_CBC, iv)
padded = add_padding(data, self.BLOCK_SIZE)
encrypted = cipher.encrypt(padded)
return encrypted
def decrypt_data(self, data, private_key, secret_key):
secret_key = cut_key(secret_key, self.key_size)
iv = cut_key(private_key, self.BLOCK_SIZE)
cipher = AES.new(secret_key, AES.MODE_CBC, iv)
decrypted = cipher.decrypt(data)
return strip_padding(decrypted)
|
bsd-2-clause
| 146,333,978,805,470,620 | 30.861538 | 89 | 0.62337 | false |
manasgarg/flask-sauth
|
flask_sauth/forms.py
|
1
|
3424
|
#!/usr/bin/env python
from datetime import datetime
from wtforms import Form, TextField, PasswordField, HiddenField, ValidationError
from wtforms import validators as v
from models import User, authenticate
class RegistrationForm( Form):
name = TextField( validators=[v.DataRequired(), v.Length(max=256)])
email = TextField( validators=[v.DataRequired(), v.Email(), v.Length(max=256), v.Email()])
password = PasswordField( validators=[v.DataRequired(), v.Length(max=256)])
next = HiddenField()
def validate_email( form, field):
email = field.data.lower().strip()
if( User.objects(email=email).count()):
raise ValidationError( "Hey! This email is already registered with us. Did you forget your password?")
def save( self):
user = User.create_user( self.name.data, self.email.data, self.password.data, email_verified=True)
user.save()
return user
class LoginForm( Form):
email = TextField(u"Email Address", validators=[v.DataRequired(), v.Email()])
password = PasswordField( validators=[v.DataRequired()])
next = HiddenField()
def validate_email( self, field):
if( not User.objects( email=field.data).count()):
raise ValidationError( "This email address is not registered.")
def validate_password( self, field):
self.user_cache = authenticate(email=self.email.data, password=field.data)
if self.user_cache is None:
raise ValidationError("Please enter correct information. Note that password is case-sensitive.")
elif not self.user_cache.is_email_activated:
raise ValidationError("This account is inactive.")
class ResetPasswordForm(Form):
email = TextField(u"Email Address", validators=[v.DataRequired(), v.Email()])
def validate_email( self, field):
email = field.data.lower().strip()
if( User.objects.filter( email=email).count() == 0):
raise ValidationError( "This email address is not registered with us.")
field.data = email
return True
class NewPasswordForm( Form):
password1 = PasswordField( "Please enter your password", validators=[v.DataRequired()])
password2 = PasswordField( "Please re-enter your password", validators=[v.DataRequired()])
def validate_password2( self, field):
password1 = self.password1.data
password2 = self.password2.data
if( password1 != password2):
raise ValidationError( "The passwords don't match.")
class ChangePasswordForm( Form):
password = PasswordField( "Current Password", validators=[v.DataRequired()])
password1 = PasswordField( "New Password", validators=[v.DataRequired()])
password2 = PasswordField( "Re-enter New Password", validators=[v.DataRequired()])
def __init__( self, *args, **kwargs):
super( ChangePasswordForm, self).__init__( *args, **kwargs)
def validate_password( self, field):
from flask_login import current_user
user_cache = authenticate(email=current_user.email, password=field.data)
if( not user_cache):
raise ValidationError( "The current password that you entered is wrong.")
def validate_password2( self, field):
password1 = self.password1.data
password2 = self.password2.data
if( password1 != password2):
raise ValidationError( "The passwords don't match.")
|
bsd-3-clause
| -6,195,643,495,887,408,000 | 39.282353 | 114 | 0.672897 | false |
k-zen/Ama
|
ama/file_listener.py
|
1
|
2550
|
# -*- coding: utf-8 -*-
"""
Clase perteneciente al módulo de procesamiento de datos e inferencias Ama.
.. module:: file_listener
:platform: Unix
:synopsis: Funciones útiles para la detección de cambios en directorios. Ej. cuando se agrega un nuevo archivo de radar.
.. moduleauthor:: Andreas P. Koenzen <akc@apkc.net>
"""
import ama.utils as utils
import ama.processor as processor
import os
import time
from watchdog.events import FileSystemEventHandler
__author__ = "Andreas P. Koenzen"
__copyright__ = "Copyright 2016, Proyecto de Tesis / Universidad Católica de Asunción."
__credits__ = "Andreas P. Koenzen"
__license__ = "BSD"
__version__ = "0.1"
__maintainer__ = "Andreas P. Koenzen"
__email__ = "akc@apkc.net"
__status__ = "Prototype"
class FileListener(FileSystemEventHandler):
"""
Manejador de cambios en un directorio previamente establecido.
"""
layer = 0
"""
int: La capa de datos a procesar.
"""
def __init__(self, layer):
self.layer = layer
def on_created(self, event):
# dormir la hebra por 15 segundos, para esperar que el archivo sea copiado por completo.
time.sleep(15)
print(utils.Colors.BOLD + "INFO: Detectado archivo nuevo. Procesando..." + utils.Colors.ENDC)
try:
if utils.Utils.should_process_file(event.src_path, processor.Processor.FILE_SIZE_LIMIT, True):
print(utils.Colors.BOLD + "ARCHIVO: {0}".format(event.src_path) + utils.Colors.ENDC)
# procesar el archivo.
processor.Processor().single_correlate_dbz_to_location_to_json(event.src_path, self.layer)
else:
print(utils.Colors.FAIL + "ERROR: El archivo detectado no cumple con los requisitos de procesamiento." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "ARCHIVO: {0}".format(event.src_path) + utils.Colors.ENDC)
except Exception as e:
print(utils.Colors.FAIL + "ERROR: Procesando archivo nuevo." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "DESC: {0}".format(e) + utils.Colors.ENDC)
finally:
# siempre borrar el archivo que fue procesado.
if processor.Processor.SHOULD_REMOVE_PROCESSED_FILES == 1:
try:
os.remove(event.src_path)
except Exception as e:
print(utils.Colors.FAIL + "ERROR: Borrando archivo original." + utils.Colors.ENDC)
print(utils.Colors.FAIL + "DESC: {0}".format(e) + utils.Colors.ENDC)
|
bsd-2-clause
| -5,032,125,557,542,773,000 | 37.560606 | 139 | 0.636935 | false |
ntoll/foox
|
foox/species/third.py
|
1
|
12608
|
"""
This module encapsulates the behaviour of third species counterpoint.
"""
import random
import foox.ga as ga
from .utils import is_parallel, make_generate_function, is_stepwise_motion
# Some sane defaults.
DEFAULT_POPULATION_SIZE = 4000
DEFAULT_MAX_GENERATION = 200
DEFAULT_MUTATION_RANGE = 3
DEFAULT_MUTATION_RATE = 0.4
# Intervals between notes that are allowed in third species counterpoint.
CONSONANCES = [2, 4, 5, 7, 9, 11]
DISSONANCES = [3, 6, 8, 10]
VALID_ODD_BEAT_INTERVALS = CONSONANCES
VALID_EVEN_BEAT_INTERVALS = CONSONANCES + DISSONANCES
# Various rewards and punishments used with different aspects of the solution.
# Reward / punishment to ensure the solution starts correctly (5th or 8ve).
REWARD_FIRST = 1
PUNISH_FIRST = 0.1
# Reward / punishment to ensure the solution finishes correctly (at an 8ve).
REWARD_LAST = 1
PUNISH_LAST = 0.1
# Reward / punishment to ensure the penultimate note is step wise onto the
# final note.
REWARD_LAST_STEP = 2
PUNISH_LAST_STEP = 0.7
# Reward / punish contrary motion onto the final note.
REWARD_LAST_MOTION = 3
PUNISH_LAST_MOTION = 0.1
# Punishment if the penultimate note is a repeated note.
PUNISH_REPEATED_PENULTIMATE = 0.1
# Make sure the movement to the penultimate note isn't from too
# far away (not greater than a third).
REWARD_PENULTIMATE_PREPARATION = 1
PUNISH_PENULTIMATE_PREPARATION = 0.7
# Punish parallel fifths or octaves.
PUNISH_PARALLEL_FIFTHS_OCTAVES = 0.5
# Punishment for too many parallel/similar movements.
PUNISH_PARALLEL = 0.1
# Reward / punish correct stepwise movement around dissonances.
REWARD_STEPWISE_MOTION = 0.5
PUNISH_STEPWISE_MOTION = 0.1
# Punishment for too many repeated notes.
PUNISH_REPEATS = 1
# Punishment for too many large leaps in the melody.
PUNISH_LEAPS = 0.7
# The highest score a candidate solution may achieve. (Hack!)
MAX_REWARD = (
REWARD_FIRST
+ REWARD_LAST
+ REWARD_LAST_STEP
+ REWARD_LAST_MOTION
+ REWARD_PENULTIMATE_PREPARATION
)
def create_population(number, cantus_firmus):
"""
Will create a new list of random candidate solutions of the specified
number given the context of the cantus_firmus.
"""
result = []
for i in range(number):
new_chromosome = []
for note in cantus_firmus:
valid_odd_beat_range = [
interval
for interval in VALID_ODD_BEAT_INTERVALS
if (interval + note) < 17
]
valid_even_beat_range = [
interval
for interval in VALID_EVEN_BEAT_INTERVALS
if (interval + note) < 17
]
first_beat_interval = random.choice(valid_odd_beat_range)
second_beat_interval = random.choice(valid_even_beat_range)
third_beat_interval = random.choice(valid_odd_beat_range)
fourth_beat_interval = random.choice(valid_even_beat_range)
new_chromosome.append(note + first_beat_interval)
new_chromosome.append(note + second_beat_interval)
new_chromosome.append(note + third_beat_interval)
new_chromosome.append(note + fourth_beat_interval)
# Remove the last three beats since they're surplus to requirements.
genome = Genome(new_chromosome[:-3])
result.append(genome)
return result
def make_fitness_function(cantus_firmus):
"""
Given the cantus firmus, will return a function that takes a single Genome
instance and returns a fitness score.
"""
# Melody wide measures.
repeat_threshold = len(cantus_firmus) * 0.5
jump_threshold = len(cantus_firmus) * 0.3
def fitness_function(genome):
"""
Given a candidate solution will return its fitness score assuming
the cantus_firmus in this closure. Caches the fitness score in the
genome.
"""
# Save some time!
if genome.fitness is not None:
return genome.fitness
# The fitness score to be returned.
fitness_score = 0.0
# Counts the number of repeated notes.
repeats = 0
# Counts the amount of parallel motion.
parallel_motion = 0
# Counts the number of jumps in the melodic contour.
jump_contour = 0
contrapunctus = genome.chromosome
# Make sure the solution starts correctly (at a 5th or octave).
first_interval = contrapunctus[0] - cantus_firmus[0]
if first_interval == 7 or first_interval == 4:
fitness_score += REWARD_FIRST
else:
fitness_score -= PUNISH_FIRST
# Make sure the solution finishes correctly (at an octave).
if contrapunctus[-1] - cantus_firmus[-1] == 7:
fitness_score += REWARD_LAST
else:
fitness_score -= PUNISH_LAST
# Ensure the penultimate note is step wise onto the final note.
if abs(contrapunctus[-1] - contrapunctus[-2]) == 1:
fitness_score += REWARD_LAST_STEP
else:
fitness_score -= PUNISH_LAST_STEP
# Reward contrary motion onto the final note.
cantus_firmus_motion = cantus_firmus[-1] - cantus_firmus[-2]
contrapunctus_motion = contrapunctus[-1] - contrapunctus[-2]
if (cantus_firmus_motion < 0 and contrapunctus_motion > 0) or (
cantus_firmus_motion > 0 and contrapunctus_motion < 0
):
fitness_score += REWARD_LAST_MOTION
else:
fitness_score -= PUNISH_LAST_MOTION
# Make sure the penultimate note isn't a repeated note.
penultimate_preparation = abs(contrapunctus[-2] - contrapunctus[-3])
if penultimate_preparation == 0:
fitness_score -= PUNISH_REPEATED_PENULTIMATE
else:
# Make sure the movement to the penultimate note isn't from too
# far away (not greater than a third).
if penultimate_preparation < 2:
fitness_score += REWARD_PENULTIMATE_PREPARATION
else:
fitness_score -= PUNISH_PENULTIMATE_PREPARATION
# Check the fitness of the body of the solution.
last_notes = (contrapunctus[0], cantus_firmus[0])
last_interval = last_notes[0] - last_notes[1]
for i in range(1, len(contrapunctus) - 1):
contrapunctus_note = contrapunctus[i]
cantus_firmus_note = cantus_firmus[i // 4]
current_notes = (contrapunctus_note, cantus_firmus_note)
current_interval = contrapunctus_note - cantus_firmus_note
# Punish parallel fifths or octaves.
if (current_interval == 4 or current_interval == 7) and (
last_interval == 4 or last_interval == 7
):
fitness_score -= PUNISH_PARALLEL_FIFTHS_OCTAVES
# Check for parallel motion.
if is_parallel(last_notes, current_notes):
parallel_motion += 1
# Check if the melody is a repeating note.
if contrapunctus_note == last_notes[0]:
repeats += 1
# Check the melodic contour.
contour_leap = abs(current_notes[0] - last_notes[0])
if contour_leap >= 2:
jump_contour += contour_leap - 2
# Ensure dissonances are part of a step-wise movement.
if i % 2 and current_interval in DISSONANCES:
# The current_note is a dissonance on the third beat of a bar.
# Check that both the adjacent notes are only a step away.
if is_stepwise_motion(contrapunctus, i):
fitness_score += REWARD_STEPWISE_MOTION
else:
fitness_score -= PUNISH_STEPWISE_MOTION
else:
if is_stepwise_motion(contrapunctus, i):
fitness_score += REWARD_STEPWISE_MOTION
last_notes = current_notes
last_interval = current_interval
# Punish too many (> 1/3) repeated notes.
if repeats > repeat_threshold:
fitness_score -= PUNISH_REPEATS
# Punish too many (> 1/3) parallel movements.
if parallel_motion > repeat_threshold:
fitness_score -= PUNISH_PARALLEL
# Punish too many large leaps in the melody.
if jump_contour > jump_threshold:
fitness_score -= PUNISH_LEAPS
genome.fitness = fitness_score
return fitness_score
return fitness_function
def make_halt_function(cantus_firmus):
"""
Returns a halt function for the given cantus firmus and third species
counterpoint.
"""
def halt(population, generation_count):
"""
Given a population of candidate solutions and generation count (the
number of epochs the algorithm has run) will return a boolean to
indicate if an acceptable solution has been found within the
referenced population.
"""
fittest = population[0]
max_fitness = MAX_REWARD
for i in range(len(fittest.chromosome)):
# Check for dissonances. Each dissonance should have incremented
# the fitness because it has been "placed" correctly.
cantus_firmus_note = cantus_firmus[i // 4]
melody_note = fittest.chromosome[i]
interval = melody_note - cantus_firmus_note
if interval in DISSONANCES:
max_fitness += REWARD_STEPWISE_MOTION
else:
if i > 0 and i < (len(fittest.chromosome) - 2):
if is_stepwise_motion(fittest.chromosome, i):
max_fitness += REWARD_STEPWISE_MOTION
return (
fittest.fitness >= max_fitness
or generation_count > DEFAULT_MAX_GENERATION
)
return halt
class Genome(ga.Genome):
"""
A class to represent a candidate solution for second species counterpoint.
"""
def mutate(self, mutation_range, mutation_rate, context):
"""
Mutates the genotypes no more than the mutation_range depending on the
mutation_rate given and the cantus_firmus passed in as the context (to
ensure the mutation is valid).
"""
odd_beat_mutation_intervals = [
interval
for interval in VALID_ODD_BEAT_INTERVALS
if interval <= mutation_range
]
even_beat_mutation_intervals = [
interval
for interval in VALID_EVEN_BEAT_INTERVALS
if interval <= mutation_range
]
chromosome_length = len(self.chromosome)
for locus in range(chromosome_length):
if mutation_rate >= random.random():
cantus_firmus_note = context[locus // 4]
# The pitch of the notes immediately before and after the
# current note (used to avoid mutations that result in a
# repeated pitch).
pitches_to_avoid = []
if locus > 0:
pre_pitch = self.chromosome[locus - 1]
pitches_to_avoid.append(pre_pitch)
if locus < chromosome_length - 2:
post_pitch = self.chromosome[locus + 1]
pitches_to_avoid.append(post_pitch)
if locus % 2:
# Current melody note is on an even beat of the bar
mutation_intervals = [
i
for i in even_beat_mutation_intervals
if cantus_firmus_note + i not in pitches_to_avoid
]
if not mutation_intervals:
mutation_intervals = even_beat_mutation_intervals
else:
# Current melody note is on an odd beat of the bar.
mutation_intervals = [
i
for i in odd_beat_mutation_intervals
if cantus_firmus_note + i not in pitches_to_avoid
]
if not mutation_intervals:
mutation_intervals = odd_beat_mutation_intervals
valid_mutation_range = [
interval
for interval in mutation_intervals
if (interval + cantus_firmus_note) < 17
]
mutation = random.choice(valid_mutation_range)
new_allele = cantus_firmus_note + mutation
self.chromosome[locus] = new_allele
# Resets fitness score
self.fitness = None
|
mit
| -8,580,951,361,316,207,000 | 37.206061 | 78 | 0.598509 | false |
denibertovic/django-client-certificates
|
client_certs/admin.py
|
1
|
1094
|
from django.contrib import admin
from .models import Cert
from .cert import revoke_certificates
class CertAdmin(admin.ModelAdmin):
list_display = ('user', 'install_link', 'is_valid', 'valid_until')
fields = ('user', 'country', 'state', 'locality', 'organization',
'organizational_unit', 'common_name', 'description', 'valid_until')
def install_link(self, obj):
return '<a href="%s">Install Link</a>' % obj.get_absolute_url()
install_link.allow_tags = True
def revoke_certificate(self, request, queryset):
for_revokation = [cert.x509 for cert in queryset if cert.is_valid and cert.is_installed]
revoke_certificates(for_revokation)
updated = queryset.update(is_valid=False)
if updated == 1:
message = '1 Certificate was revoked.'
else:
message = '%s Certificates were revoked.' % updated
self.message_user(request, message)
revoke_certificate.short_description = "Revoke selected Client Certificates"
actions = [revoke_certificate]
admin.site.register(Cert, CertAdmin)
|
bsd-2-clause
| -7,066,993,477,669,360,000 | 32.151515 | 96 | 0.667276 | false |
forgeservicelab/forge.insightly-sync
|
ldap_updater.py
|
1
|
27000
|
"""Push updates to LDAP."""
import ldap as _ldap
import ldap.modlist as _modlist
import logging
from __init__ import sanitize, fileToRedmine
from unidecode import unidecode
from canned_mailer import CannedMailer
from insightly_updater import InsightlyUpdater
from fuzzywuzzy.process import extractOne
class ForgeLDAP(object):
"""LDAP connection wrapper.
Represents an LDAP connection and exposes LDAP CRUD operation funtions.
"""
_c = None
_logger = None
_redmine_key = None
username = None
def __init__(self, user, pwd, host, redmine_key=None):
"""Initialize the LDAP connection.
Initialize an LDAP object and bind it to the specified host.
Args:
user (str): The cn attribute of the account to use for binding. Must have administrator rights.
pwd (str): The password for the specified user.
host (str): The FQDN or IP of the host running the LDAP server. Connection uses ldaps protocol.
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._redmine_key = redmine_key
dn = 'cn=%s,%s' % (user, LDAPUpdater._LDAP_TREE['accounts'])
_ldap.set_option(_ldap.OPT_X_TLS_REQUIRE_CERT, _ldap.OPT_X_TLS_ALLOW)
self._c = _ldap.initialize("ldaps://%s" % host)
self.username = user
self._c.bind_s(dn, pwd, _ldap.AUTH_SIMPLE)
def destroy(self):
"""Unbind the underlying LDAP connection.
Ensures that the LDAP conection does not remain open.
"""
self._c.unbind_s()
def ldap_search(self, *args, **kwargs):
"""Search LDAP.
Performs an LDAP search.
Args:
*args: positional arguments for ldap synchronous search, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous search, as per python ldap module.
Returns:
List: A list containing the results from the LDAP search.
None: If there are no results.
"""
try:
ldap_res = self._c.search_s(*args, **kwargs)
except _ldap.NO_SUCH_OBJECT:
return None
return ldap_res
def ldap_add(self, *args):
"""Add entries to LDAP.
Performs an LDAP add operation.
Args:
*args: positional arguments for ldap synchronous add, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous add, as per python ldap module.
"""
try:
self._c.add_s(*args)
except _ldap.ALREADY_EXISTS, err:
self._logger.info('%s; %s' % (err, 'Ignoring.'))
except _ldap.LDAPError, err:
self._logger.error('Try LDAPadd: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPadd: %s'
% (err, args))
def ldap_update(self, *args):
"""Modify entries on LDAP.
Performs an LDAP modify operation.
Args:
*args: positional arguments for ldap synchronous modify, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous modify, as per python ldap module.
"""
try:
self._c.modify_s(*args)
except _ldap.LDAPError, err:
self._logger.error('Try LDAPmodify: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPmodify: %s'
% (err, args))
def ldap_delete(self, *args):
"""Delete entries from LDAP.
Performs an LDAP delete operation.
Args:
*args: positional arguments for ldap synchronous delete, as per python ldap module.
*kwargs: keyword arguments for ldap synchronous delete, as per python ldap module.
"""
try:
self._c.delete_s(*args)
except _ldap.LDAPError, err:
self._logger.error('Try LDAPdelete: %s' % list(args))
self._logger.error(err)
if self._redmine_key:
fileToRedmine(key=self._redmine_key, subject=err.__class__.__name__, message='%s\nTry LDAPdelete: %s'
% (err, args))
class LDAPUpdater:
"""Update LDAP server to represent identity and membership relations stated on Insightly.
Attributes:
SDA: Constant representing the name of the SDA category on Insightly.
FPA: Constant representing the name of the FPA category on Insightly.
FPA_CRA: Constant representing the name of the FPA with CRA category on Insightly.
OS_TENANT: Constant representing the name of the OpenStack tenant category on Insightly.
PIPELINE_NAME: Constant representing the name of the Project execution pipeline on Insightly.
ACTION_CREATE: Constant representing the create key for the Action function.
ACTION_DELETE: Constant representing the delete key for the Action function.
ACTION_UPDATE: Constant representing the update key for the Action function.
"""
SDA = 'SDA'
FPA = 'FPA'
FPA_CRA = 'FPA (CRA)'
OS_TENANT = 'OpenStack Tenant'
PIPELINE_NAME = 'Project execution'
ACTION_CREATE = 'create'
ACTION_DELETE = 'delete'
ACTION_UPDATE = 'update'
_LDAP_TREE = {'accounts': "ou=accounts,dc=forgeservicelab,dc=fi",
'projects': "ou=projects,dc=forgeservicelab,dc=fi",
'admins': "cn=ldap_admins,ou=roles,dc=forgeservicelab,dc=fi"}
_PROTECTED_ACCOUNTS = ['admin', 'binder', 'pwdchanger', 'syncer']
_ALL_OTHER_GROUPS_FILTER = '(&(|(objectClass=groupOfNames)\
(objectClass=groupOfUniqueNames))\
(|(member=cn={user_cn},%(s)s)\
(uniqueMember=cn={user_cn},%(s)s))\
(!(cn:dn:={project_cn})))'.replace(' ', '') % {'s': _LDAP_TREE['accounts']}
_PLACEHOLDER_NAME = 'FirstName'
_PLACEHOLDER_SN = 'LastName'
def __init__(self, insightlyUpdater, args):
"""Initialize instance."""
self.mailer = CannedMailer(args)
self.updater = insightlyUpdater
def _parseName(self, name):
"""Return the first element of a compound name that is not a known particle.
Args:
name (str): The name to be parsed.
Returns:
str: The transliterated first non-particle element of a name, capped to 10 characters.
"""
PARTICLES = ['de', 'della', 'von', 'und']
SPECIAL_CHARS = ['\'', '.', '!']
splitName = reduce(list.__add__, map(lambda n: n.split('-'), name.split()))
try:
while splitName[0].lower() in PARTICLES:
splitName.pop(0)
except IndexError:
pass
return unidecode(filter(lambda c: c not in SPECIAL_CHARS,
splitName[0].decode('utf-8').lower()[:10])) if splitName else None
def _ldapCN(self, userID, ldap_conn):
return ldap_conn.ldap_search(self._LDAP_TREE['accounts'], _ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % userID,
attrsonly=1)[0][0]
def _createCN(self, user, ldap_conn):
firstName = None if user['givenName'] is self._PLACEHOLDER_NAME else self._parseName(user['givenName'])
lastName = None if user['sn'] is self._PLACEHOLDER_SN else self._parseName(user['sn'])
cn = '.'.join(filter(lambda n: n, [firstName, lastName]))
suffix = 0
while ldap_conn.ldap_search('cn=%s,%s' % (cn, self._LDAP_TREE['accounts']), _ldap.SCOPE_BASE, attrsonly=1):
cn = '%s.%s' % (cn[:-2], suffix)
suffix += 1
return cn
def _disableAndNotify(self, dn, ldap_conn):
account = ldap_conn.ldap_search(dn, _ldap.SCOPE_BASE, attrlist=['employeeType', 'cn', 'mail'])[0][1]
if account and ('employeeType' not in account or not extractOne(account['employeeType'][0],
['disabled'], score_cutoff=80)):
ldap_conn.ldap_update(dn, [(_ldap.MOD_REPLACE, 'employeeType', 'disabled')])
map(lambda e: self.mailer.sendCannedMail(e, self.mailer.CANNED_MESSAGES['disabled_account'],
account['cn'][0]), account['mail'])
def _pruneAccounts(self, ldap_conn):
# Disable orphans
map(lambda entry: self._disableAndNotify(entry, ldap_conn),
map(lambda dn: dn[0],
filter(lambda a: 'memberOf' not in a[1].keys() and not any(cn in a[0] for cn in
self._PROTECTED_ACCOUNTS),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
attrlist=['memberOf']))))
# Re-enable non orphans
map(lambda entry: ldap_conn.ldap_update(entry, [(_ldap.MOD_REPLACE, 'employeeType', None)]),
map(lambda dn: dn[0],
filter(lambda a: 'memberOf' in a[1].keys(),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
attrlist=['memberOf'],
filterstr='(employeeType=disabled)'))))
def _getLDAPCompatibleProject(self, project, objectClass, ldap_conn):
project = project.copy()
project['objectClass'] = objectClass
project['owner'] = [self._ldapCN(owner['employeeNumber'], ldap_conn) for owner in project.pop('owner', [])]
project['member'] = [self._ldapCN(member['employeeNumber'], ldap_conn) for member in project.pop('member', [])]
project['seeAlso'] = [self._ldapCN(seeAlso['employeeNumber'],
ldap_conn) for seeAlso in project.pop('seeAlso', [])]
project['uniqueMember'] = project['member']
project.pop('tenants')
project.pop('member' if objectClass is 'groupOfUniqueNames' else 'uniqueMember')
return project
def _getLDAPCompatibleAccount(self, account):
account = account.copy()
account['objectClass'] = 'inetOrgPerson'
if extractOne('True', account.pop('isHidden'), score_cutoff=75):
account['employeeType'] = 'hidden'
return account
# deprecated
def _createRecord(self, project, ldap_conn):
return filter(lambda r: len(r[1]), [
('objectClass', ['groupOfNames']),
('cn', [project['cn']]),
('o', project['o']),
('owner', map(lambda o: self._ldapCN(o['uid'], ldap_conn), project['owner'])),
('seeAlso', map(lambda a: self._ldapCN(a['uid'], ldap_conn), project['seeAlso'])),
('member', map(lambda m: self._ldapCN(m['uid'], ldap_conn), project['members'])),
('description', ['type:%s' % item for item in project['description']])
])
# deprecated
def _createTenantRecord(self, tenant, ldap_conn):
record = self._createRecord(tenant, ldap_conn)
record = map(lambda r: r if r[0] != 'objectClass' else (r[0], ['groupOfUniqueNames']), record)
if len(record) == 7:
record = map(lambda r: r if r[0] != 'owner' else ('uniqueMember', r[1]), record)
record.pop(4)
record.pop(4)
else:
record = map(lambda r: r if r[0] != 'member' else ('uniqueMember', r[1]), record)
return record
def _createOrUpdate(self, member_list, ldap_conn):
new_records = filter(lambda m: not ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % m['employeeNumber'],
attrsonly=1),
member_list)
map(lambda c: ldap_conn.ldap_add('cn=%s,%s' % (self._createCN(c, ldap_conn), self._LDAP_TREE['accounts']),
_modlist.addModlist(self._getLDAPCompatibleAccount(c),
ignore_attr_types=['cn'])),
new_records)
map(lambda u: ldap_conn.ldap_update('%s' % self._ldapCN(u['employeeNumber'], ldap_conn),
_modlist.modifyModlist(ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s'
% u['employeeNumber'])[0][1],
self._getLDAPCompatibleAccount(u),
ignore_attr_types=['userPassword', 'cn'])),
filter(lambda m: cmp(dict(self._getLDAPCompatibleAccount(m)),
ldap_conn.ldap_search(self._LDAP_TREE['accounts'],
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % m['employeeNumber'],
attrlist=['displayName', 'objectClass', 'employeeType',
'mobile', 'employeeNumber', 'sn',
'mail', 'givenName'])[0][1]),
member_list))
return new_records
def _sendNewAccountEmails(self, new_accounts, project_type, ldap_conn):
map(lambda d: map(lambda t: self.mailer.sendCannedMail(t,
self.mailer.CANNED_MESSAGES['new_devel_account'] if
project_type in [self.SDA, self.OS_TENANT] else
self.mailer.CANNED_MESSAGES['new_partner_account'],
d['cn'][0]),
d['mail']),
map(lambda a: ldap_conn.ldap_search('ou=accounts,dc=forgeservicelab,dc=fi',
_ldap.SCOPE_ONELEVEL,
filterstr='employeeNumber=%s' % a['employeeNumber'],
attrlist=['cn', 'mail'])[0][1],
new_accounts))
# deprecated
def _ensureButlerService(self, record):
if not any([member.startswith('cn=butler.service') for
member in filter(lambda attribute: attribute[0] == 'uniqueMember', record)[0][1]]):
record = map(lambda r: r if r[0] != 'uniqueMember'
else ('uniqueMember',
['cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi'] + r[1]), record)
return record
def _addAndNotify(self, dn, tenant, ldap_conn):
add_butler = False
if 'Digile.Platform' in dn:
self.updater\
.addUserToProject(ldap_conn.ldap_search('cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi',
_ldap.SCOPE_BASE,
attrlist=['employeeNumber'])[0][1]['employeeNumber'][0],
tenant)
add_butler = all([member['displayName'] != 'Butler Service' for member in tenant['member']])
ldap_tenant = self._getLDAPCompatibleProject(tenant, 'groupOfUniqueNames', ldap_conn)
if add_butler:
ldap_tenant['uniqueMember'] += ['cn=butler.service,ou=accounts,dc=forgeservicelab,dc=fi']
ldap_conn.ldap_add(dn, _modlist.addModlist(ldap_tenant))
map(lambda ml: map(lambda e: self.mailer.sendCannedMail(e,
self.mailer.CANNED_MESSAGES['added_to_tenant'],
ldap_tenant['cn']),
ml),
[ldap_conn.ldap_search(s, _ldap.SCOPE_BASE,
attrlist=['mail'])[0][1]['mail'] for s in ldap_tenant['uniqueMember']])
def _createTenants(self, tenant_list, project, ldap_conn):
if tenant_list:
map(lambda t: self._sendNewAccountEmails(self._createOrUpdate(t['member'], ldap_conn),
self.OS_TENANT, ldap_conn), tenant_list)
map(lambda c: self._addAndNotify('cn=%s,cn=%s,%s' % (c['cn'], project['cn'], self._LDAP_TREE['projects']),
c, ldap_conn),
tenant_list)
else:
insightly_tenant = self.updater.createDefaultTenantFor(project)
tenant = project.copy()
tenant['o'] = str(insightly_tenant['PROJECT_ID'])
tenant['uniqueMember'] = tenant.pop('owner', [])
tenant.pop('seeAlso')
self._sendNewAccountEmails(self._createOrUpdate(tenant['uniqueMember'], ldap_conn),
self.OS_TENANT, ldap_conn)
self._addAndNotify('cn=%(cn)s,cn=%(cn)s,%(sf)s' %
{'cn': project['cn'], 'sf': self._LDAP_TREE['projects']}, tenant, ldap_conn)
def _create(self, project, project_type, ldap_conn):
self._sendNewAccountEmails(self._createOrUpdate(project['member'], ldap_conn), project_type, ldap_conn)
ldap_conn.ldap_add(
'cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
_modlist.addModlist(self._getLDAPCompatibleProject(project, 'groupOfNames', ldap_conn)))
if project_type in [self.SDA, self.FPA_CRA]:
self._createTenants(project['tenants'], project, ldap_conn)
self.updater.updateProject(project, status=self.updater.STATUS_RUNNING)
map(lambda a: map(lambda m: self.mailer.sendCannedMail(m, self.mailer.CANNED_MESSAGES['notify_admin_contact'],
a['displayName']),
a['mail']),
project['seeAlso'])
map(lambda a: map(lambda m: self.mailer.sendCannedMail(m, self.mailer.CANNED_MESSAGES['added_to_project'],
project['cn']), a['mail']), project['member'])
def _updateAndNotify(self, dn, record, ldap_conn, is_tenant=False):
ldap_record = ldap_conn.ldap_search(dn, _ldap.SCOPE_BASE)[0][1]
dict_record = self._getLDAPCompatibleProject(record,
'groupOfUniqueNames' if is_tenant else 'groupOfNames',
ldap_conn)
if cmp(dict_record, ldap_record):
ldap_conn.ldap_update(dn, _modlist.modifyModlist(ldap_record, dict_record))
new_users = filter(lambda m: m not in (ldap_record['uniqueMember'] if 'uniqueMember' in ldap_record.keys()
else ldap_record['member']),
(dict_record['uniqueMember'] if 'uniqueMember' in dict_record.keys()
else dict_record['member']))
gone_users = filter(lambda m: m not in (dict_record['uniqueMember'] if 'uniqueMember' in dict_record.keys()
else dict_record['member']),
(ldap_record['uniqueMember'] if 'uniqueMember' in ldap_record.keys()
else ldap_record['member']))
if any(member_attribute in dict_record.keys() for member_attribute in ['member', 'uniqueMember']):
map(lambda email_list: map(lambda e: self.mailer
.sendCannedMail(e,
self.mailer.CANNED_MESSAGES['added_to_tenant']
if any(self.OS_TENANT in s for s in
dict_record['description']) else
self.mailer.CANNED_MESSAGES[
'added_to_project'],
dict_record['cn'][0]), email_list),
map(lambda s: ldap_conn.ldap_search(s, _ldap.SCOPE_BASE, attrlist=['mail'])[0][1]['mail'],
new_users))
map(lambda email_list: map(lambda e: self.mailer
.sendCannedMail(e,
self.mailer.CANNED_MESSAGES[
'deleted_from_tenant']
if any(self.OS_TENANT in s for s in
dict_record['description']) else
self.mailer.CANNED_MESSAGES[
'deleted_from_project'],
dict_record['cn'][0]), email_list),
map(lambda s: ldap_conn.ldap_search(s, _ldap.SCOPE_BASE, attrlist=['mail'])[0][1]['mail'],
gone_users))
def _updateTenants(self, tenant_list, project, ldap_conn):
map(lambda t: self._sendNewAccountEmails(self._createOrUpdate(t['member'], ldap_conn),
self.OS_TENANT, ldap_conn), tenant_list)
ldap_tenant_cns = [cn[1]['cn'][0] for cn in ldap_conn.ldap_search('cn=%s,%s' %
(project['cn'],
self._LDAP_TREE['projects']),
_ldap.SCOPE_ONELEVEL, attrlist=['cn'])]
new_tenants = filter(lambda t: t['cn'] not in ldap_tenant_cns, tenant_list)
removed_tenant_cns = filter(lambda cn: cn not in [tenant['cn'] for tenant in tenant_list], ldap_tenant_cns)
if new_tenants or not tenant_list:
self._createTenants(new_tenants, project, ldap_conn)
if removed_tenant_cns:
map(lambda cn: ldap_conn.ldap_delete('cn=%s,cn=%s,%s' % (cn, project['cn'], self._LDAP_TREE['projects'])),
removed_tenant_cns)
map(lambda u: self._updateAndNotify('cn=%s,cn=%s,%s' % (u['cn'], project['cn'], self._LDAP_TREE['projects']),
u, ldap_conn, is_tenant=True),
filter(lambda nonews: nonews not in new_tenants,
filter(lambda t: ldap_conn.ldap_search('cn=%s,cn=%s,%s' %
(t['cn'], project['cn'],
self._LDAP_TREE['projects']),
_ldap.SCOPE_BASE), tenant_list)))
def _update(self, project, project_type, ldap_conn):
ldap_record = ldap_conn.ldap_search('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
_ldap.SCOPE_BASE)
if ldap_record:
self._sendNewAccountEmails(self._createOrUpdate(project['member'], ldap_conn), project_type, ldap_conn)
self._updateAndNotify('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']),
project,
# map(lambda t: (_ldap.MOD_REPLACE, t[0], t[1]),
# self._createRecord(project, ldap_conn)),
ldap_conn)
if project_type in [self.SDA, self.FPA_CRA]:
self._updateTenants(project['tenants'], project, ldap_conn)
else:
self._create(project, project_type, ldap_conn)
def _deleteTenants(self, tenant_list, project, ldap_conn):
former_members = []
map(lambda tenant: members.extend(ldap_conn.ldap_search(tenant, _ldap.SCOPE_BASE,
attrlist=['uniqueMember'])[0][1]['uniqueMember']),
tenant_list)
map(lambda tenant: ldap_conn.ldap_delete(tenant), tenant_list)
def _delete(self, project, project_type, ldap_conn):
tenant_list = ldap_conn.ldap_search('cn=%s,' % project['cn'] + self._LDAP_TREE['projects'],
_ldap.SCOPE_SUBORDINATE, attrlist=['o'])
for tenant in tenant_list or []:
tenant[1]['o'] = tenant[1]['o'][0]
map(lambda tenant: ldap_conn.ldap_delete(tenant[0]), tenant_list or [])
ldap_conn.ldap_delete('cn=%s,%s' % (project['cn'], self._LDAP_TREE['projects']))
map(lambda tenant: self.updater.updateProject(tenant[1], updateStage=False,
status=self.updater.STATUS_COMPLETED), tenant_list or [])
self.updater.updateProject(project, updateStage=False, status=self.updater.STATUS_COMPLETED)
_actions = {
ACTION_CREATE: _create,
ACTION_DELETE: _delete,
ACTION_UPDATE: _update
}
def Action(self, action, data_list, ldap_conn):
"""Perform a CRUD action against LDAP.
Triggers the generation of LDAP payload and executes the requested action against the LDAP connection.
Args:
action (str): The action to perform, one of ACTION_CREATE, ACTION_DELETE or ACTION_UPDATE.
data_list (List): A list of the elements to use as payload for the CRUD action against LDAP.
ldap_conn (ForgeLDAP): An initialized LDAP connection to perform actions against.
"""
map(lambda k: map(lambda p: self._actions[action](self, p, k, ldap_conn), data_list[k]), data_list.keys())
self._pruneAccounts(ldap_conn)
|
mit
| 6,728,499,487,768,978,000 | 51.023121 | 119 | 0.505963 | false |
hlange/LogSoCR
|
pysc/usi/log/console_reporter.py
|
1
|
4184
|
import json
import colorlog
import logging
import logging.handlers
from elloghandler.handler import ElLogHandler
from socr_streamhandler.handler import SoCR_StreamHandler
from socr_filehandler.handler import SoCR_FileHandler
from datetime import datetime, date, time
def logger_conf(loglevel,index=None):
#Formatter
COLORLOG_FORMAT = '@%(time)s ns /%(delta_count)s (%(blue)s%(filename)s%(white)s): %(log_color)s%(levelname)s%(reset)s: %(message)s %(parameters)s'
STD_FORMAT = '@%(time)s ns /%(delta_count)s (%(filename)s): %(levelname)s: %(message)s %(parameters)s '
FILE_FORMAT = '%(asctime)s @%(time)s ns /%(delta_count)s (%(filename)s): %(levelname)s: %(message)s %(parameters)s'
#Logger initialization
log_root = logging.getLogger()
log_root.setLevel(logging.DEBUG)
#disable elastic search logger, makes problem when activated
es_log = logging.getLogger("elasticsearch")
es_log.propagate = False
#ElasticSearch Handler
if index is not None:
eh = ElLogHandler(index)
else:
eh = ElLogHandler()
#console handler
ch = SoCR_StreamHandler()
ch.setFormatter(colorlog.ColoredFormatter(
COLORLOG_FORMAT,
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red,bg_white',
}))
#file handler
if loglevel[2] != 'x':
fh = SoCR_FileHandler(datetime.now().strftime('log_%H:%M_%d_%m_%Y.log'))
fh.setFormatter(logging.Formatter(FILE_FORMAT))
#setting the priorities of the handler
#Elasticsearch
if loglevel[0] == '0':
eh.setLevel(logging.INFO)
elif loglevel[0] == '1':
eh.setLevel(logging.WARNING)
elif loglevel[0] == '2':
eh.setLevel(logging.ERROR)
elif loglevel[0] == '3':
eh.setLevel(logging.CRITICAL)
if loglevel[0] == 'x':
pass
else:
log_root.addHandler(eh)
#File
if loglevel[2] == '0':
fh.setLevel(logging.INFO)
elif loglevel[2] == '1':
fh.setLevel(logging.WARNING)
elif loglevel[2] == '2':
fh.setLevel(logging.ERROR)
elif loglevel[2] == '3':
fh.setLevel(logging.CRITICAL)
if loglevel[2] == 'x':
pass
else:
log_root.addHandler(fh)
#Terminal
if loglevel[4] == '0':
ch.setLevel(logging.INFO)
elif loglevel[4] == '1':
ch.setLevel(logging.WARNING)
elif loglevel[4] == '2':
ch.setLevel(logging.ERROR)
elif loglevel[4] == '3':
ch.setLevel(logging.CRITICAL)
if loglevel[4] == 'x':
pass
else:
log_root.addHandler(ch)
logger = logging.getLogger(__name__)
#function for log messages
def report(
message_type=None,
message_text=None,
severity=None,
file_name=None,
line_number=None,
time=None,
delta_count=None,
process_name=None,
verbosity=None,
what=None,
actions=None,
phase=None,
**kwargs):
parameters = " "
for value in kwargs:
if isinstance(kwargs[value], int):
parameters += "{0}={1:#x} ".format(value, kwargs[value])
else:
parameters += "{0}={1} ".format(value, kwargs[value])
extra={
'message_type':message_type,
'severity': severity,
'file_name': file_name,
'line_number': line_number,
'time': time,
'delta_count': delta_count,
'process_name': process_name,
'verbosity': verbosity,
'what': what,
'actions': actions,
'phase': phase,
'parameters':parameters}
#Collection log information
extra.update(kwargs)
if severity == 0:
logger.info(message_text, extra=extra)
elif severity == 1:
logger.warning(message_text, extra=extra)
elif severity == 2:
logger.error(message_text, extra=extra)
elif severity >= 3:
logger.critical(message_text, extra=extra)
|
agpl-3.0
| -30,429,728,282,431,344 | 26.526316 | 150 | 0.573853 | false |
cstrap/python-vuejs
|
docs/conf.py
|
1
|
8556
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# python_vuejs documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import python_vuejs
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Python and Vue.js integration'
copyright = u"2017, Christian Strappazzon"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = python_vuejs.__version__
# The full version, including alpha/beta/rc tags.
release = python_vuejs.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python_vuejsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'python_vuejs.tex',
u'Python and Vue.js integration Documentation',
u'Christian Strappazzon', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python_vuejs',
u'Python and Vue.js integration Documentation',
[u'Christian Strappazzon'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'python_vuejs',
u'Python and Vue.js integration Documentation',
u'Christian Strappazzon',
'python_vuejs',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| -3,259,803,063,698,980,000 | 30.112727 | 76 | 0.706989 | false |
jvkersch/hsmmlearn
|
docs/conf.py
|
1
|
9948
|
# -*- coding: utf-8 -*-
#
# hsmmlearn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 1 17:33:24 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Avoid using C libraries on RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.stats', 'matplotlib',
'matplotlib.pyplot', 'hsmmlearn.base']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'hsmmlearn'
copyright = u'2016, Joris Vankerschaver'
author = u'Joris Vankerschaver'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from
# docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'hsmmlearndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'hsmmlearn.tex', u'hsmmlearn Documentation',
u'Joris Vankerschaver', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'hsmmlearn', u'hsmmlearn Documentation',
author, 'hsmmlearn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
gpl-3.0
| 8,598,572,443,615,208,000 | 31.298701 | 79 | 0.702151 | false |
hguemar/cinder
|
cinder/tests/test_drbdmanagedrv.py
|
1
|
11637
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
from oslo.utils import importutils
from oslo.utils import timeutils
from cinder import context
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
class mock_dbus():
def __init__(self):
pass
@staticmethod
def Array(defaults, signature=None):
return defaults
class mock_dm_utils():
@staticmethod
def dict_to_aux_props(x):
return x
class mock_dm_const():
TQ_GET_PATH = "get_path"
class mock_dm_exc():
DM_SUCCESS = 0
DM_EEXIST = 1
DM_ENOENT = 2
DM_ERROR = 1000
pass
import sys
sys.modules['dbus'] = mock_dbus
sys.modules['drbdmanage'] = collections.namedtuple(
'module', ['consts', 'exceptions', 'utils'])
sys.modules['drbdmanage.utils'] = collections.namedtuple(
'module', ['dict_to_aux_props'])
sys.modules['drbdmanage.consts'] = collections.namedtuple(
'module', [])
sys.modules['drbdmanage.exceptions'] = collections.namedtuple(
'module', ['DM_EEXIST'])
from cinder.volume.drivers.drbdmanagedrv import DrbdManageDriver
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mock.MockObject(conf.Configuration)
configuration.san_is_local = False
configuration.append_config_values(mock.IgnoreArg())
return configuration
class DrbdManageFakeDriver():
resources = {}
def __init__(self):
self.calls = []
def list_resources(self, res, serial, prop, req):
self.calls.append(["list_resources", res, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", dict(prop))])
def create_resource(self, res, props):
self.calls.append(["create_resource", res, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def create_volume(self, res, size, props):
self.calls.append(["create_volume", res, size, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def auto_deploy(self, res, red, delta, site_clients):
self.calls.append(["auto_deploy", res, red, delta, site_clients])
return [[mock_dm_exc.DM_SUCCESS, "ack", []] * red]
def list_volumes(self, res, ser, prop, req):
self.calls.append(["list_volumes", res, ser, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", dict(), [(2, dict(prop))])
])
def remove_volume(self, res, nr, force):
self.calls.append(["remove_volume", res, nr, force])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def text_query(self, cmd):
self.calls.append(["text_query", cmd])
if cmd[0] == mock_dm_const.TQ_GET_PATH:
return ([(mock_dm_exc.DM_SUCCESS, "ack", [])], ['/dev/drbd0'])
return ([(mock_dm_exc.DM_ERROR, 'unknown command', [])], [])
def list_assignments(self, nodes, res, ser, prop, req):
self.calls.append(["list_assignments", nodes, res, ser, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("node", "res", dict(), [(2, dict(prop))])
])
def create_snapshot(self, res, snap, nodes, props):
self.calls.append(["create_snapshot", res, snap, nodes, props])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def list_snapshots(self, res, sn, prop, req):
self.calls.append(["list_snapshots", res, sn, prop, req])
if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
return ([mock_dm_exc.DM_ENOENT, "none", []],
[])
else:
return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
[("res", [("snap", dict(prop))])
])
def remove_snapshot(self, res, snap, force):
self.calls.append(["remove_snapshot", res, snap, force])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def resize_volume(self, res, vol, ser, size, delta):
self.calls.append(["resize_volume", res, vol, ser, size, delta])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
def restore_snapshot(self, res, snap, new, rprop, vprops):
self.calls.append(["restore_snapshot", res, snap, new, rprop, vprops])
return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
class DrbdManageTestCase(test.TestCase):
def setUp(self):
self.ctxt = context.get_admin_context()
self._mock = mock.Mock()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.san_is_local = True
self.configuration.reserved_percentage = 1
super(DrbdManageTestCase, self).setUp()
self.stubs.Set(importutils, 'import_object',
self.fake_import_object)
self.stubs.Set(DrbdManageDriver, 'call_or_reconnect',
self.fake_issue_dbus_call)
self.stubs.Set(DrbdManageDriver, 'dbus_connect',
self.fake_issue_dbus_connect)
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_const \
= mock_dm_const
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_utils \
= mock_dm_utils
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_exc \
= mock_dm_exc
self.configuration.safe_get = lambda x: 'fake'
# Infrastructure
def fake_import_object(self, what, configuration, db, executor):
return None
def fake_issue_dbus_call(self, fn, *args):
return apply(fn, args)
def fake_issue_dbus_connect(self):
self.odm = DrbdManageFakeDriver()
def call_or_reconnect(self, method, *params):
return apply(method, params)
# Tests per se
def test_create_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "create_resource")
self.assertEqual(dmd.odm.calls[1][0], "create_volume")
self.assertEqual(dmd.odm.calls[1][2], 1048576)
self.assertEqual(dmd.odm.calls[2][0], "auto_deploy")
def test_delete_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[0][3]["cinder-id"], testvol['id'])
self.assertEqual(dmd.odm.calls[1][0], "remove_volume")
def test_local_path(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
data = dmd.local_path(testvol)
self.assertTrue(data.startswith("/dev/drbd"))
def test_create_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111',
'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[1][0], "list_assignments")
self.assertEqual(dmd.odm.calls[2][0], "create_snapshot")
self.assertTrue('node' in dmd.odm.calls[2][3])
def test_delete_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[1][0], "remove_snapshot")
def test_extend_volume(self):
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.extend_volume(testvol, 5)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[0][3]["cinder-id"], testvol['id'])
self.assertEqual(dmd.odm.calls[1][0], "resize_volume")
self.assertEqual(dmd.odm.calls[1][1], "res")
self.assertEqual(dmd.odm.calls[1][2], 2)
self.assertEqual(dmd.odm.calls[1][3], -1)
self.assertEqual(dmd.odm.calls[1][4]['size'], 5242880)
def test_create_cloned_volume(self):
srcvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
dmd = DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_cloned_volume(newvol, srcvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
self.assertEqual(dmd.odm.calls[1][0], "list_assignments")
self.assertEqual(dmd.odm.calls[2][0], "create_snapshot")
self.assertEqual(dmd.odm.calls[3][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[4][0], "restore_snapshot")
self.assertEqual(dmd.odm.calls[5][0], "list_snapshots")
self.assertEqual(dmd.odm.calls[6][0], "remove_snapshot")
self.assertEqual(dmd.odm.calls[6][0], "remove_snapshot")
|
apache-2.0
| 8,359,004,270,935,753,000 | 35.942857 | 78 | 0.592679 | false |
thewizardplusplus/micro
|
micro/utilities.py
|
1
|
1209
|
import os.path
from . import function_type
MICRO_VERSION = '2.3'
HEXADECIMAL_NUMBER = '[A-Fa-f0-9]'
def extract_and_add_function(entity, functions):
entity_type = function_type.FunctionType(
len(entity.children[0].children[1].children),
function_type.make_type(entity.children[0].children[2].children[0]),
)
_add_to_functions(
functions,
entity.children[0].children[0].value,
entity_type,
)
return entity_type
def extract_and_add_assignment(entity, functions):
entity_type = function_type.make_type(entity.children[0].children[1])
_add_to_functions(
functions,
entity.children[0].children[0].value,
entity_type,
)
return entity_type
def make_arguments_processor(argument_handler):
return lambda function: \
lambda *arguments: \
function(*list(map(argument_handler, arguments)))
def get_base_path(filename):
base_path = None
if filename is not None and filename != '-':
base_path = os.path.dirname(filename)
return base_path
def _add_to_functions(functions, entity_name, entity_type):
if entity_name != '':
functions[entity_name] = entity_type
|
mit
| 8,061,258,927,986,879,000 | 25.866667 | 76 | 0.65426 | false |
deerwalk/voltdb
|
tests/sqlcoverage/schema/sql-grammar-gen-schema.py
|
1
|
14110
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2017 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# This file contains the schema for running SqlCoverage tests against a
# file of SQL statements randomly generated by the SQL-grammar-generator
# test application; as such, it is consistent with:
# voltdb/tests/sqlcoverage/ddl/sql-grammar-gen-DDL.sql, i.e.,
# voltdb/tests/sqlgrammar/DDL.sql.
{
"P0": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ()
},
"R0": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ()
},
"P1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R1": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R2": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R3": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P4": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R4": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"P5": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
},
"R5": {
"columns": (("ID", FastSerializer.VOLTTYPE_INTEGER),
("TINY", FastSerializer.VOLTTYPE_TINYINT),
("SMALL", FastSerializer.VOLTTYPE_SMALLINT),
("INT", FastSerializer.VOLTTYPE_INTEGER),
("BIG", FastSerializer.VOLTTYPE_BIGINT),
("NUM", FastSerializer.VOLTTYPE_FLOAT),
("DEC", FastSerializer.VOLTTYPE_DECIMAL),
("VCHAR_INLINE", FastSerializer.VOLTTYPE_STRING),
("VCHAR_INLINE_MAX", FastSerializer.VOLTTYPE_STRING),
("VCHAR", FastSerializer.VOLTTYPE_STRING),
("VCHAR_JSON", FastSerializer.VOLTTYPE_STRING),
("TIME", FastSerializer.VOLTTYPE_TIMESTAMP),
("VARBIN", FastSerializer.VOLTTYPE_VARBINARY),
("POINT", FastSerializer.VOLTTYPE_GEOGRAPHY_POINT),
("POLYGON", FastSerializer.VOLTTYPE_GEOGRAPHY)),
"partitions": (),
"indexes": ("ID")
}
}
|
agpl-3.0
| -9,008,785,377,411,091,000 | 53.061303 | 73 | 0.549043 | false |
davidbgk/udata
|
udata/tests/frontend/test_territories.py
|
1
|
1165
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from flask import url_for
from udata.tests.features.territories.test_territories_process import (
create_geozones_fixtures
)
from udata.tests.frontend import FrontTestCase
class TerritoriesTest(FrontTestCase):
modules = ['features.territories', 'admin', 'search', 'core.dataset',
'core.reuse', 'core.site', 'core.organization']
def setUp(self):
super(TerritoriesTest, self).setUp()
self.paca, self.bdr, self.arles = create_geozones_fixtures()
def test_towns(self):
response = self.client.get(
url_for('territories.territory', territory=self.arles))
self.assert404(response) # By default towns are deactivated.
def test_counties(self):
response = self.client.get(
url_for('territories.territory', territory=self.bdr))
self.assert404(response) # By default counties are deactivated.
def test_regions(self):
response = self.client.get(
url_for('territories.territory', territory=self.paca))
self.assert404(response) # By default regions are deactivated.
|
agpl-3.0
| -1,281,052,889,574,804,500 | 34.30303 | 73 | 0.672961 | false |
kpbochenek/empireofcode
|
call_base.py
|
1
|
1180
|
# kpbochenek@gmail.com
def total_cost(calls):
result = 0
for c in calls:
length = int(c.split(' ')[2])
mins = int(round(length / 60, 0))
if mins > 100:
result += (mins - 100) * 2
mins = min(100, mins - 100)
result += mins
print(result)
return result
if __name__ == '__main__':
# These "asserts" using for checking and not necessary for auto-testing
assert total_cost(("2014-01-01 01:12:13 181",
"2014-01-02 20:11:10 600",
"2014-01-03 01:12:13 6009",
"2014-01-03 12:13:55 200")) == 124, "Base example"
assert total_cost(("2014-02-05 01:00:00 1",
"2014-02-05 02:00:00 1",
"2014-02-05 03:00:00 1",
"2014-02-05 04:00:00 1")) == 4, "Short calls but money"
assert total_cost(("2014-02-05 01:00:00 60",
"2014-02-05 02:00:00 60",
"2014-02-05 03:00:00 60",
"2014-02-05 04:00:00 6000")) == 106, "Precise calls"
print("All set? Click 'Check' to review your code and earn rewards!")
|
apache-2.0
| 791,751,094,155,661,700 | 34.757576 | 78 | 0.482203 | false |
uclouvain/OSIS-Louvain
|
learning_unit/tests/api/serializers/test_component.py
|
1
|
2250
|
##############################################################################
#
# OSIS stands for Open Student Information System. It's an application
# designed to manage the core business of higher education institutions,
# such as universities, faculties, institutes and professional schools.
# The core business involves the administration of students, teachers,
# courses, programs and so on.
#
# Copyright (C) 2015-2019 Université catholique de Louvain (http://www.uclouvain.be)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.test import TestCase
from base.tests.factories.learning_component_year import LearningComponentYearFactory
from learning_unit.api.serializers.component import LearningUnitComponentSerializer
class LearningUnitComponentSerializerTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.component = LearningComponentYearFactory()
cls.serializer = LearningUnitComponentSerializer(cls.component)
def test_contains_expected_fields(self):
expected_fields = [
'type',
'type_text',
'planned_classes',
'hourly_volume_total_annual',
'hourly_volume_total_annual_computed'
]
self.assertListEqual(list(self.serializer.data.keys()), expected_fields)
def test_ensure_compute_correct_volume(self):
self.assertEqual(
self.serializer.data['hourly_volume_total_annual_computed'],
str(self.component.vol_global)
)
|
agpl-3.0
| 6,898,525,427,319,093,000 | 42.25 | 87 | 0.663851 | false |
waristo/opensource
|
tensorflow/tensorflow.py
|
1
|
1806
|
import tensorflow as tf #Tensorflow사용을 위한 import
from tensorflow.examples.tutorials.mnist import input_data
# Dataset loading
mnist = input_data.read_data_sets("./samples/MNIST_data/", one_hot=True)
# Set up model
x = tf.placeholder(tf.float32, [None, 784]) #심볼릭 변수들을 사용하여 상호작용하는 작업들을 기술
W = tf.Variable(tf.zeros([784, 10])) #tf.Variable를 사용한 예제
b = tf.Variable(tf.zeros([10])) #tf.Variable를 사용한 예제
y = tf.nn.softmax(tf.matmul(x, W) + b) #9번줄과 10번줄을 이용한 모델 구현
y_ = tf.placeholder(tf.float32, [None, 10]) #정답을 입력하기 위한 새 placeholder를 추가
cross_entropy = -tf.reduce_sum(y_*tf.log(y)) #교차 엔트로피 −∑y′log(y) 를 구현
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy) #비용 최소화에 어떤 변수가 얼마나 영향을 주는지를 계산
# Session
init = tf.initialize_all_variables() #만든 변수들을 초기화하는 작업
sess = tf.Session() #세션에서 모델을 시작하고 변수들을 초기화
sess.run(init) #실행
# Learning
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# Validation
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) #특정한 축을 따라 가장 큰 원소의 색인을 알려준다
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #얼마나 많은 비율로 맞았는지 확인
# Result should be approximately 91%.
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels})) #테스트 데이터를 대상으로 정확도
|
gpl-3.0
| -6,607,569,596,838,577,000 | 41.411765 | 110 | 0.660194 | false |
jgorgulho/installs
|
workstationInstall.py
|
1
|
8596
|
#!/bin/env python
import os
import sys
import shlex
import subprocess
import shutil
#
# Constants
#
WELCOME_STRING = """
########################################################
# Running Installation Script for Workstation #
########################################################\n\n\n
"""
RAN_SCRIP_STRING = """\n
########################################################
# Finished running Installation Script for Workstation #
########################################################
"""
GNOME_SHELL_EXTENSIONS_FOLDER = "gnomeShellExtentionsToInstall"
caffeineInstallScriptFile =(GNOME_SHELL_EXTENSIONS_FOLDER +
"/" + "caffeineInstallScript.sh")
caffeineInstallScriptFileContents = """
#!/bin/env bash
ROOT=$(pwd)
mkdir tempGnomeExtensionsInstallFolder &&
cd tempGnomeExtensionsInstallFolder &&
rm -rf gnome-shell-extension-caffeine &&
git clone git://github.com/eonpatapon/gnome-shell-extension-caffeine.git &&
cd gnome-shell-extension-caffeine &&
./update-locale.sh &&
glib-compile-schemas --strict --targetdir=caffeine@patapon.info/schemas/ caffeine@patapon.info/schemas &&
cp -r caffeine@patapon.info ~/.local/share/gnome-shell/extensions
"""
RUN_SCRIPT_AS_ROOT_STRING = "\n\nPlease run this script as root or equivalent.\n\n"
DNF_CONST_FILE = "/etc/dnf/dnf.conf"
DNF_DELTARPM_CONFIG_STRING = "deltarpm=1"
OS_UPDATE_SYSTEM = "sudo dnf update -y"
SUDO_GET_PASSWORD = "sudo touch /tmp/tempFileForInstallationScript"
SUDO_FORGET_PASSWORD = "sudo -k"
SUDO_FORGET_PASSWORD_STRING = "\n\nForgetting sudo password.\n\n"
INSTALL_PACKAGE_CMD = "sudo dnf install -y "
FEDORA_VERSION_NUMBER = subprocess.check_output(['rpm','-E %fedora'])
RPM_FUSION_FREE_DOWNLOAD_URL = ("\"https://download1.rpmfusion.org/free/fedora"
"/rpmfusion-free-release-" + FEDORA_VERSION_NUMBER.strip() +
".noarch.rpm\"")
RPM_FUSION_NONFREE_DOWNLOAD_URL = ("\"https://download1.rpmfusion.org/nonfree"
"/fedora/rpmfusion-nonfree-release-" + FEDORA_VERSION_NUMBER.strip() +
".noarch.rpm\"")
ATOM_EDITOR_DOWNLOAD_URL = "https://atom.io/download/rpm"
PACKAGES_FILE = "gnomeShell3Packages.txt"
PACKAGE_TO_INSTALL_LIST = " "
FILES_IN_FOLDER = " "
LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE = "filesToKeep.txt"
ERROR_OPENING_PACKAGES_TO_KEEP_FILE = ("\n\nPlease make sure that the file "
+ LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE + " exists.\n\n")
FILES_TO_KEEP_AFTER_RUNNING = " "
ERROR_OPENING_PACKAGES_FILE = ("\n\nPlease make sure that the file "
+ PACKAGES_FILE + " exists.\n\n")
ERROR_GETTING_LIST_OF_FILES_IN_FOLDER = ("\n\nCouldn't get list of files from" +
"folder.\n\n ")
ERROR_RUNNING_COMMAND = "\n\n Error running the command: "
ERROR_OPENING_FILE = "\n\n Error opening the command: "
COMMAND_GET_FILES_TO_KEEP = "cat filesToKeep.txt"
KEEP_PASSWORD = 0
TEMP_POST_INSTALL_SCRIPT_FILE = "tempPostInstallScript.sh"
#
# Functions
#
def getListOfFilesToKeepAfterRunning():
global FILES_TO_KEEP_AFTER_RUNNING
try:
with open(LIST_OF_FILES_TO_KEEP_AFTER_RUNNING_FILE) as f:
FILES_TO_KEEP_AFTER_RUNNING = f.readlines()
FILES_TO_KEEP_AFTER_RUNNING = [x.strip() for x in FILES_TO_KEEP_AFTER_RUNNING]
except:
print(ERROR_OPENING_PACKAGES_TO_KEEP_FILE)
exitScript(KEEP_PASSWORD)
print("Finished getting files to keep after install.")
def writeContentsToFile(localFileToWriteTo, localContentsToWrite):
try:
localTempFileToWriteContents = open(localFileToWriteTo,"w")
localTempFileToWriteContents.write(localContentsToWrite)
localTempFileToWriteContents.close()
except:
fileNotOpenSuccessfully(localFileToWriteTo)
exitScript(KEEP_PASSWORD)
def executeFile(localFileToRun):
runCommand("sh ./" + localFileToRun)
def makeFileExecutable(localFileToTurnExecutable):
runCommand("chmod +x " + localFileToTurnExecutable)
def runCommand(localCommandToRun):
try:
subprocess.call(shlex.split(localCommandToRun))
except:
commandNotRanSuccessfully(localCommandToRun)
exitScript(KEEP_PASSWORD)
def fileNotOpenSuccessfully(localFileNotOpen):
print(ERROR_OPENING_FILE + localFileNotOpen +" \n\n\n")
def commandNotRanSuccessfully(commandRan):
print(ERROR_RUNNING_COMMAND + commandRan +" \n\n\n")
def exitScript(forgetPass):
if(forgetPass == 0):
makeSudoForgetPass()
printEndString()
exit()
def setDeltaRpm():
fobj = open(DNF_CONST_FILE)
dnfConfFile = fobj.read().strip().split()
stringToSearch = DNF_DELTARPM_CONFIG_STRING
if stringToSearch in dnfConfFile:
print("Delta rpm already configured.\n")
else:
print('Setting delta rpm...\n')
fobj.close()
commandToRun = "sudo sh -c 'echo " + DNF_DELTARPM_CONFIG_STRING + " >> " + DNF_CONST_FILE +"'"
runCommand(commandToRun)
def performUpdate():
print("\nUpdating system...\n")
runCommand(OS_UPDATE_SYSTEM)
print("\nUpdated system.\n")
def performInstallFirstStage():
setDeltaRpm()
def installPackage(localPackageToInstall):
commandToRun = INSTALL_PACKAGE_CMD + localPackageToInstall
runCommand(commandToRun)
def installRpmFusion():
print("\nInstalling rpmfusion...\n")
installPackage(RPM_FUSION_FREE_DOWNLOAD_URL)
installPackage(RPM_FUSION_NONFREE_DOWNLOAD_URL)
print("\nInstaled rpmfusion.\n")
def installAtomEditor():
print("\nInstalling Atom editor...\n")
installPackage(ATOM_EDITOR_DOWNLOAD_URL)
print("\nInstaled Atom editor.\n")
def getListOfPackagesToInstall():
print("Getting list of packages to install from " + PACKAGES_FILE + " ...")
global PACKAGE_TO_INSTALL_LIST
try:
PACKAGE_TO_INSTALL_LIST = subprocess.check_output(['cat',PACKAGES_FILE])
except:
print(ERROR_OPENING_PACKAGES_FILE)
exitScript(KEEP_PASSWORD)
print("Finished getting package list.")
def installPackagesFromFile():
print("Installing packages from list...")
installPackage(PACKAGE_TO_INSTALL_LIST)
print("Finished installing package list.")
def getListOfFilesInFolder():
print("Getting list of files in folder ...")
global FILES_IN_FOLDER
tempCurrentFolder = os.getcwd()
FILES_IN_FOLDER = os.listdir(tempCurrentFolder)
print("Finished getting list of files in folder.")
def cleanAfterInstall():
getListOfFilesToKeepAfterRunning()
getListOfFilesInFolder()
FILES_IN_FOLDER.sort()
FILES_TO_KEEP_AFTER_RUNNING.sort()
for fileInFolder in FILES_IN_FOLDER:
#for fileToKeep in FILES_TO_KEEP_AFTER_RUNNING:
if(fileInFolder not in FILES_TO_KEEP_AFTER_RUNNING):
print(fileInFolder + " is not in files to keep.")
try:
os.remove(fileInFolder)
except OSError, e:
try:
shutil.rmtree(fileInFolder)
except OSError, e:
print ("Error: %s - %s." % (e.filename,e.strerror))
def installCaffeineGnomeExtention():
# Caffeine Gnome Shell Extension
print("Installing Caffeine Gnome Shell Extensions...")
writeContentsToFile(caffeineInstallScriptFile,caffeineInstallScriptFileContents)
makeFileExecutable(caffeineInstallScriptFile)
executeFile(caffeineInstallScriptFile)
print("Instaled Caffeine Gnome Shell Extensions.")
def performInstallFourthStage():
installCaffeineGnomeExtention()
def performInstallThirdStage():
getListOfPackagesToInstall()
installPackagesFromFile()
def performInstallSecondtStage():
installRpmFusion()
def performInstall():
performInstallFirstStage()
performUpdate()
performInstallSecondtStage()
performUpdate()
performInstallThirdStage()
performInstallFourthStage()
cleanAfterInstall()
makeFileExecutable(TEMP_POST_INSTALL_SCRIPT_FILE)
executeFile(TEMP_POST_INSTALL_SCRIPT_FILE)
def checkIfUserHasRootRights():
return(os.geteuid())
def printWelcomeString():
print(WELCOME_STRING)
def printNeedRootRightsString():
print(RUN_SCRIPT_AS_ROOT_STRING)
def printEndString():
print(RAN_SCRIP_STRING)
def getSudoPass():
runCommand(SUDO_GET_PASSWORD)
def makeSudoForgetPass():
print(SUDO_FORGET_PASSWORD_STRING)
runCommand(SUDO_FORGET_PASSWORD)
def main():
printWelcomeString()
if(checkIfUserHasRootRights() == 0):
performInstall()
else:
try:
getSudoPass()
except:
printNeedRootRightsString()
exitScript(KEEP_PASSWORD)
performInstall()
exitScript(KEEP_PASSWORD)
#
# Run Main Script
#
main()
|
gpl-3.0
| -5,450,493,732,590,814,000 | 31.684411 | 105 | 0.684388 | false |
justinvforvendetta/electrum-rby
|
gui/qt/qrcodewidget.py
|
1
|
3721
|
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
import os
import qrcode
import electrum_rby
from electrum_rby import bmp
from electrum_rby.i18n import _
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (r.width() - size)/2
top = (r.height() - size)/2
# Make a white margin around the QR in case of dark theme use
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.setBrush(black)
qp.setPen(black)
else:
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize, boxsize)
qp.end()
class QRDialog(QDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
QDialog.__init__(self, parent)
d = self
d.setWindowTitle(title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
config = electrum_rby.get_config()
if config:
filename = os.path.join(config.path, "qrcode.bmp")
def print_qr():
bmp.save_qrcode(qrw.qr, filename)
QMessageBox.information(None, _('Message'), _("QR code saved to file") + " " + filename, _('OK'))
def copy_to_clipboard():
bmp.save_qrcode(qrw.qr, filename)
QApplication.clipboard().setImage(QImage(filename))
QMessageBox.information(None, _('Message'), _("QR code saved to clipboard"), _('OK'))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(d.accept)
b.setDefault(True)
vbox.addLayout(hbox)
d.setLayout(vbox)
|
gpl-3.0
| 1,245,174,242,556,951,300 | 26.768657 | 113 | 0.52244 | false |
panmari/tensorflow
|
tensorflow/examples/skflow/boston.py
|
1
|
1485
|
# Copyright 2015-present Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sklearn import datasets, cross_validation, metrics
from sklearn import preprocessing
from tensorflow.contrib import skflow
# Load dataset
boston = datasets.load_boston()
X, y = boston.data, boston.target
# Split dataset into train / test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2, random_state=42)
# scale data (training set) to 0 mean and unit Std. dev
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# Build 2 layer fully connected DNN with 10, 10 units respecitvely.
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10],
steps=5000, learning_rate=0.1, batch_size=1)
# Fit
regressor.fit(X_train, y_train)
# Predict and score
score = metrics.mean_squared_error(regressor.predict(scaler.fit_transform(X_test)), y_test)
print('MSE: {0:f}'.format(score))
|
apache-2.0
| -4,287,292,242,645,421,000 | 34.357143 | 91 | 0.751515 | false |
aliaksandrb/anydo_api
|
docs/conf.py
|
1
|
8486
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# anydo_api documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import anydo_api
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AnyDo API Python'
copyright = u'2015, Aliaksandr Buhayeu'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = anydo_api.__version__
# The full version, including alpha/beta/rc tags.
release = anydo_api.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'anydo_apidoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'anydo_api.tex',
u'AnyDo API Python Documentation',
u'Aliaksandr Buhayeu', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'anydo_api',
u'AnyDo API Python Documentation',
[u'Aliaksandr Buhayeu'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'anydo_api',
u'AnyDo API Python Documentation',
u'Aliaksandr Buhayeu',
'anydo_api',
'Unofficial AnyDo API client in object-oriented style.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
| 8,169,311,527,095,483,000 | 29.858182 | 76 | 0.705161 | false |
jmrozanec/white-bkg-classification
|
scripts/preprocessing.py
|
1
|
1441
|
#https://github.com/tflearn/tflearn/issues/180
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn.data_utils import image_preloader
import skimage
from skimage import data
from skimage import filters
import os
from skimage import io
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
reds="../images/pictures/red/"
greens="../images/pictures/green/"
redshist="../images/histograms/red/"
greenshist="../images/histograms/green/"
directory=reds
histdirectory=redshist
for filename in os.listdir(directory):
if filename.endswith(".jpg"):
img = io.imread(os.path.join(directory, filename))
hist, bin_edges = np.histogram(img, bins=255)
bin_centers = 0.5*(bin_edges[:-1] + bin_edges[1:])
binary_img = img > 0.8
plt.figure(figsize=(1,1))
fig, ax = plt.subplots(nrows=1, ncols=1) #http://stackoverflow.com/questions/9622163/save-plot-to-image-file-instead-of-displaying-it-using-matplotlib-so-it-can-be
plt.plot(bin_centers, hist, lw=2)
fig.savefig(os.path.join(histdirectory, filename), bbox_inches='tight')
plt.close()
else:
continue
|
apache-2.0
| 3,670,213,307,102,543,000 | 36.921053 | 164 | 0.76891 | false |
dimonaks/siman
|
siman/functions.py
|
1
|
29689
|
from __future__ import division, unicode_literals, absolute_import
import os, tempfile, copy, math, itertools, sys
import numpy as np
from operator import itemgetter
from itertools import product
try:
import scipy
except:
print('functions.py: no scipy, smoother() will not work()')
from siman import header
from siman.header import print_and_log, printlog, runBash, eV_A_to_J_m
from siman.small_functions import is_list_like, is_string_like, gunzip_file, makedir, grep_file, setting_sshpass
def unique_elements(seq, idfun=None):
# return only unique_elements order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def smoother(x, n, mul = 1, align = 1):
"""
mul - additionally multiplies values
#align - find first non-zero point and return it to zero
#n - smooth value,
if algo = 'gaus' than it is sigma
use something like 0.8
if algo = 'my'
n of 10-15 is good
"""
algo = 'gaus'
# algo = 'my'
if algo == 'my':
x_smooth = []
L = len(x)
store = np.zeros((n,1),float)
for u in range(L-n):
for v in range(n):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
for u in range(L-n,L):
for v in range(L-u-1):
store[v] = x[u+v]
av = float(sum(store)) / n
x_smooth.append(av*mul)
elif algo == 'gaus':
x_smooth =x
# x_smooth = scipy.ndimage.filters.median_filter(x,size =4)
# print('sigma is ', n)
x_smooth = scipy.ndimage.filters.gaussian_filter1d(x_smooth, n, order =0)
# x_smooth = scipy.ndimage.interpolation.spline_filter1d(x, 4)
else:
x_smooth = x
if align:
# print(x_smooth[0])
x_smooth[0] = 0
# sys.exit()
return np.asarray(x_smooth)
def run_on_server(command, addr = None):
printlog('Running', command, 'on server ...')
command = command.replace('\\', '/') # make sure is POSIX
# sys.exit()
# print(header.sshpass)
# sys.exit()
if addr is None:
addr = header.cluster_address
if header.ssh_object:
# printlog('Using paramiko ...', imp = 'y')
# if 'ne' in header.warnings:
# sys.exit()
out = header.ssh_object.run(command, noerror = True, printout = 'ne' in header.warnings)
elif header.sshpass and header.sshpass == 'proxy':
com = 'ssh -tt sdv sshpass -f '+ header.path2pass +' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
out = out.split('Connection to')[0] # remove last message Connection to ipaddress closed
# sys.exit()
elif header.sshpass:
com = 'sshpass -f '+header.path2pass+' ssh '+addr+' "'+command+'"'
# print(com)
# sys.exit()
out = runBash(com)
# sys.exit()
else:
bash_comm = 'ssh '+addr+' "'+command+'"'
# print(bash_comm)
# sys.exit()
out = runBash(bash_comm)
out = out.split('#')[-1].strip()
printlog(out)
# print(out)
# sys.exit()
return out
def push_to_server(files = None, to = None, addr = None):
"""
if header.ssh_object then use paramiko
to (str) - path to remote folder !
"""
if not is_list_like(files):
files = [files]
to = to.replace('\\', '/') # make sure is POSIX
files_str = ' '.join(np.array(files ))
command = ' mkdir -p {:}'.format( to )
# print('asfsadfdsf', to)
printlog('push_to_server():', command, run_on_server(command, addr))
# sys.exit()
printlog('push_to_server(): uploading files ', files, 'to', addr, to)
if header.ssh_object:
for file in files:
# print(file, to)
header.ssh_object.put(file, to+'/'+os.path.basename(file) )
out = ''
elif header.sshpass and header.sshpass == 'proxy':
com = 'tar cf - '+ files_str + ' | ssh sdv "sshpass -f ~/.ssh/p ssh '+addr+' \\"cd '+header.cluster_home+' && tar xvf -\\"" '
# print(com)
# sys.exit()
out = runBash(com)
# print(out)
# sys.exit()
elif header.sshpass:
# if '@' not in addr:
# printlog('Error! Please provide address in the form user@address')
# l = addr.split('@')
# print(l)
# user = l[0]
# ad = l[1]
# com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+files_str+ ' '+addr+':'+to
# print(com)
# sys.exit()
out = runBash(com)
else:
out = runBash('rsync -uaz '+files_str+ ' '+addr+':'+to)
printlog(out)
return out
def file_exists_on_server(file, addr):
file = file.replace('\\', '/') # make sure is POSIX
printlog('Checking existence of file', file, 'on server', addr )
exist = run_on_server(' ls '+file, addr)
# if header.ssh_object:
# exist = header.ssh_object.fexists(file)
# else:
# exist = runBash('ssh '+addr+' ls '+file)
if 'No such file' in exist:
exist = ''
else:
exist = 'file exists'
if exist:
res = True
else:
res = False
printlog('File exist? ', res)
return res
def get_from_server(files = None, to = None, to_file = None, addr = None, trygz = True):
"""
Download files using either paramiko (higher priority) or rsync;
For paramiko header.ssh_object should be defined
files (list of str) - files on cluster to download
to (str) - path to local folder !
to_file (str) - path to local file (if name should be changed); in this case len(files) should be 1
The gz file is also checked
RETURN
result of download
TODO:
now for each file new connection is opened,
copy them in one connection
"""
# print(addr)
# sys.exit()
def download(file, to_file):
# print(header.sshpass)
if header.ssh_object:
exist = file_exists_on_server(file, addr)
# try:
if exist:
printlog('Using paramiko: ssh_object.get(): from to ', file, to_file)
header.ssh_object.get(file, to_file )
out = ''
# except FileNotFoundError:
else:
out = 'error, file not found'
elif header.sshpass and header.sshpass == 'proxy':
# com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar zcf - '+ file +'\\"" | tar zxf - '+to_file # does not work?
com = 'ssh sdv "sshpass -f ~/.ssh/p ssh ' + addr + ' \\"tar cf - '+ file +'\\"" > '+to_file
# print('sshpass',com)
# sys.exit()
out = runBash(com)
elif header.sshpass:
#com = 'rsync --rsh='+"'sshpass -f /home/aksenov/.ssh/p ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
com = 'rsync --rsh='+"'sshpass -f "+header.path2pass+" ssh' " +' -uaz '+addr+':'+file+ ' '+to_file
out = runBash(com)
# print(addr)
# sys.exit()
else:
# print(addr,file,to_file)
out = runBash('rsync -uaz '+addr+':'+file+ ' '+to_file)
if 'error' in out:
res = out
else:
res = 'OK'
out = ''
printlog('Download result is ', res)
return out
if '*' in files:
printlog('get_from_server(): get by template')
files = run_on_server('ls '+files, addr).splitlines()
# print(files)
# sys.exit()
printlog('get_from_server(): I download', files)
elif not is_list_like(files):
files = [files]
files = [file.replace('\\', '/') for file in files] #make sure the path is POSIX
files_str = ', '.join(np.array(files ))
printlog('Trying to download', files_str, 'from server', imp = 'n')
for file in files:
if not to and not to_file: #use temporary file
with tempfile.NamedTemporaryFile() as f:
to_file_l = f.name #system independent filename
elif not to_file: #obtain filename
to_file_l = os.path.join(to, os.path.basename(file) )
else:
to_file_l = to_file
makedir(to_file_l)
out = download(file, to_file_l)
if out and trygz:
printlog('File', file, 'does not exist, trying gz', imp = 'n')
# run_on_server
files = run_on_server(' ls '+file+'*', addr)
file = files.split()[-1]
# print(file)
nz = file.count('gz')
ext = '.gz'*nz
# file+='.gz'
to_file_l+=ext
if file:
out = download(file, to_file_l)
printlog(' gz found with multiplicity', ext, imp = 'n')
for i in range(nz):
printlog('unzipping', to_file_l)
gunzip_file(to_file_l)
to_file_l = to_file_l[:-3]
else:
printlog(' No gz either!', imp = 'n')
# if '5247' in file:
# sys.exit()
return out
def salary_inflation():
"""Calculate salary growth in Russia taking into account inflation"""
inflation2000_2014 = [
5.34,
6.45,
6.58,
6.10,
8.78,
8.80,
13.28,
11.87,
9.00 ,
10.91,
11.74,
11.99,
15.06,
18.8,
20.1]
init_salary = 1500 # in jan 2000; other sources 2000 - very important
for i, l in enumerate( reversed(inflation2000_2014) ):
init_salary = (1+l/100)*init_salary
print( init_salary, i+2000)
salary2014 = 30000
increase = salary2014/init_salary
print( increase)
# salary_inflation()
def element_name_inv(el):
el_dict = header.el_dict
nu_dict = header.nu_dict
# print type(el), el, type(str('sdf') )
if is_string_like(el):
try:
elinv = el_dict[el]
except:
print_and_log("Error! Unknown element: " +str(el))
raise RuntimeError
else:
el = int(el)
try:
elinv = nu_dict[el]
except:
print_and_log("Error! Unknown element: "+str(el))
raise RuntimeError
return elinv # inversed notion of element
invert = element_name_inv
def return_atoms_to_cell(st):
st = st.return_atoms_to_cell()
return st
def calc_ac(a1, c1, a2, c2, a_b = 0.1, c_b = 0.1, type = "two_atoms"):
"""
Calculate values of hexagonal lattice parameters for cell with two different atoms.
The used assumption is:
1. Provided lattice constants are for large enougth cells, in which excess volume (dV) of impurity does not depend on the size of cell.
2. Two atoms do not interact with each other, which allows to use dV(CO) = dV(C) + dV(O)
Two regimes:
two_atoms - calculate cell sizes if additional atom was added
double_cell - if cell was doubled; only first cell and second_cell are needed
Input:
a1, c1 - lattice constants of cell with first impurity atom (first cell)
a2, c2 - lattice constants of cell with second impurity atom (second cell)
a_b, c_b - lattice constants of cell with pure hexagonal metall
Output:
a, c - lattice constants of cell with two atoms
"""
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
A = (a1**2 * c1) + (a2**2 * c2) - (a_b**2 * c_b)
B = 0.5 * (c1/a1 + c2/a2)
C = ( (a1**2 * c1) + (a2**2 * c2) ) * 0.5 #sum of cell volumes divided by 2 since during the construction of new cell we will use multiplication by 2
# print "A,B=",A,B
a = (A/B)**(1./3)
c = a * B
a = round(a,5)
c = round(c,5)
print_and_log( "a, c, c/a for cell with pure hcp ", a_b, c_b, round(c_b/a_b,4), imp ='y' )
print_and_log( "a, c, c/a for cell with first atom ", a1, c1, round(c1/a1,4), imp ='y' )
print_and_log( "a, c, c/a for cell with second atom ", a2, c2, round(c2/a2,4), imp ='y' )
#for double cell
a3 = (C/B)**(1./3)
c3 = a3 * B
a3 = round(a3,5)
c3 = round(c3,5)
if type == "two_atoms":
print_and_log( "a, c, c/a for cell with two atoms ", a, c, round(c/a,4), "# the same cell but with two atoms\n", imp ='y')
elif type == "double_cell":
print_and_log( "a, c, c/a for new cell ", a3, c3, round(c3/a3,4), "# for cell with V = V(first_cell) + V(second cell), but only for the case if V(second cell) == V(first_cell)", imp ='y')
return a, c
def read_charge_den_vasp():
"""
Read CHG vasp file and return ChargeDen object
"""
class ChargeDen():
"""docstring for ChargeDen"""
def __init__(self, ):
# self.arg = arg
pass
def rotation_matrix(axis,theta):
axis = axis/math.sqrt(np.dot(axis,axis))
a = math.cos(theta/2)
b,c,d = -axis*math.sin(theta/2)
return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)],
[2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)],
[2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]])
def rotate():
v = np.array([3,5,0])
axis = np.array([4,4,1])
theta = 1.2
print(np.dot(rotation_matrix(axis,theta),v))
# [ 2.74911638 4.77180932 1.91629719]
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
:param vec1: A 3d "source" vector
:param vec2: A 3d "destination" vector
:return mat: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix
def plot_charge_den():
"""Test function; Was not used"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from matplotlib import cm
fig = plt.figure()
ax = fig.gca(projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
# print X
# print Y
# print Z
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3)
# cset = ax.contourf(X, Y, Z, zdir='z', offset=-100, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='x', offset=-40, cmap=cm.coolwarm)
# cset = ax.contourf(X, Y, Z, zdir='y', offset=40, cmap=cm.coolwarm)
ax.set_xlabel('X')
ax.set_xlim(-40, 40)
ax.set_ylabel('Y')
ax.set_ylim(-40, 40)
ax.set_zlabel('Z')
ax.set_zlim(-100, 100)
plt.show()
return
def plot_interaction(calclist, calc):
"""
For calculation of interaction parameter alpha;
Take in mind that this parameter is obtained under aproximation of redular solution
"""
e_seg = []
dX = []
for id in calclist:
Xgb = calc[id].Xgb
X = calc[id].X
dX.append(Xgb/1 - X)
e_seg.append(calc[id].e_seg)
# print calc[id].e_seg
# print calc[id].X
#print dX
coeffs1 = np.polyfit(dX, e_seg, 1)
fit_func1 = np.poly1d(coeffs1)
print( "list of seg energies: ", e_seg )
print( "list of dX : ", dX )
print( "Fitting using linear function:" )
print( fit_func1 )
print( "E_seg0 = {0:0.0f} meV, standart enthalpy of segregation".format(fit_func1[0]) )
print( "alpha = {0:0.0f} meV, interaction coefficient".format(-fit_func1[1]/2) )
return
def calculate_voronoi(self, state = 'end'):
# By default two quantities per atom are calculated by this compute.
# The first is the volume of the Voronoi cell around each atom.
# Any point in an atom's Voronoi cell is closer to that atom than any other.
# The second is the number of faces of the Voronoi cell, which
# is also the number of nearest neighbors of the atom in the middle of the cell.
# state - init or end; if init then saved in self.init.vorovol; if end than saved in self.vorovol
write_lammps(self, state, filepath = 'voronoi_analysis/structure.lammps') #write structure for lammps
runBash("rm voronoi_analysis/dump.voro; /home/aksenov/installed/lammps-1Feb14/src/lmp_serial < voronoi_analysis/voronoi.in > voronoi_analysis/log")
if state == 'end':
self.vorovol = []
self.vorofaces = []
vorovol = self.vorovol
vorofaces = self.vorofaces
elif state == 'init':
self.init.vorovol = []
self.init.vorofaces = []
vorovol = self.init.vorovol
vorofaces = self.init.vorofaces
vsum=0
wlist = []
with open('voronoi_analysis/dump.voro','r') as volfile: #analyze dump.voro
for line in volfile:
if 'ITEM: ATOMS ' in line:
break
for line in volfile:
ll = line.split()
if int(ll[1]) > 1:
wlist.append( [ll[0], ll[5], ll[6], ll[2]] )
# print 'Volume of atom ',ll[0],'is', ll[5]
vsum= vsum+float(ll[5])
print_and_log( 'Check total volume ', vsum, self.end.vol)
wlist.sort(key = itemgetter(0)) #sort according to the position of atoms
print_and_log( "atom #, voronoi vol, voronoi faces, x coordinate: ", )
print_and_log( wlist)
for w in wlist:
vorovol.append(float(w[1]))
vorofaces.append(int(w[2]))
# print 'Voro vol ',self.end.vorovol
# print 'Voro faces',self.end.vorofaces
# print len(wlist)
if hasattr(self, 'vorovol'):
voro = ''
if len(vorovol) == 2: #C and O
voro = " {0:5.2f} & {1:2d} & {2:5.2f} & {3:2d} ".format(vorovol[0], vorofaces[0], vorovol[1], vorofaces[1] ).center(25)
else:
voro = " {0:5.2f} & {1:2d} ".format(vorovol[0], vorofaces[0] ).center(25)
voro+='&'
else:
voro = ""
print_and_log( "Voronoi volume = ", voro, imp = 'y')
return voro
def log_history(hstring):
try:
if hstring != header.history[-1]: header.history.append( hstring )
except:
header.history.append( hstring )
return
def gb_energy_volume(gb,bulk):
if (gb.end.rprimd[1] != bulk.end.rprimd[1]).any() or (gb.end.rprimd[2] != bulk.end.rprimd[2]).any():
print_and_log("Warning! You are trying to calculate gb_energy from cells with different lateral sizes:"+str(gb.end.rprimd)+" "+str(bulk.end.rprimd)+"\n")
#print bulk.vol
V_1at = bulk.vol / bulk.natom #* to_ang**3
E_1at = bulk.energy_sigma0 / bulk.natom
A = np.linalg.norm( np.cross(gb.end.rprimd[1], gb.end.rprimd[2]) ) #surface area of gb
#print A
gb.v_gb = ( gb.vol - V_1at * gb.natom) / A / 2. * 1000
gb.e_gb = ( gb.energy_sigma0 - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.e_gb_init = ( gb.list_e_sigma0[0] - E_1at * gb.natom) / A / 2. * eV_A_to_J_m * 1000
gb.bulk_extpress = bulk.extpress
#print "Calc %s; e_gb_init = %.3f J/m^2; e_gb = %.3f J/m; v_gb = %.3f angstrom "%(gb.name, gb.e_gb_init, gb.e_gb, gb.v_gb )
outst = "%15s&%7.0f&%7.0f"%(gb.name, gb.e_gb, gb.v_gb)
return outst
def headers():
j = (7,12,14,7,8,9,9,5,5,20,5,20,8,12,20,8,5,8,8)
d="&"
header_for_bands= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"nband"+d+"Added, \%"+"\\\\"
header_for_ecut= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Ecut,eV"+"\\\\"
header_for_npar= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"NPAR".center(j[16])+d+"LPLANE".center(j[17])+"\\\\"
header_for_kpoints= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"k-spacings".center(j[9])+d+"nkpt".center(j[10])+"\\\\"
header_for_tsmear= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"k-mesh".center(j[8])+d+"tsmear, meV".center(j[13])+d+"Smearing error, meV/atom".center(j[14])+"\\\\"
header_for_stress= "Set".ljust(j[0])+d+"Etot".center(j[1])+d+"a1,a2".center(j[2])+d+"c".center(j[3])\
+d+"time, m".center(j[4])+d+"ittime, s".center(j[5])+d+"Nmd,Avr.".rjust(j[6])+d\
+"Warn!"+d+"Stress, intr u.*1000".center(j[11])+d+"Pressure, MPa".center(j[12])
#print "\\hline"
return header_for_kpoints
def read_vectors(token, number_of_vectors, list_of_words, type_func = None, lists = False):
"""Returns the list of numpy vectors for the last match"""
# lists - return list of lists instead list of vectors
if type_func is None:
type_func = lambda a : float(a)
number_of_matches = list_of_words.count( token )
if number_of_matches == 0:
#print_and_log("Warning token '"+token+"' was not found! return empty\n")
return [None]
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
#print list_of_words[index]
list_of_vectors = []
list_of_lists = []
vector = np.zeros((3))
for i in range(number_of_vectors):
vector[0] = type_func(list_of_words[index + 1])
vector[1] = type_func(list_of_words[index + 2])
vector[2] = type_func(list_of_words[index + 3])
list3 = []
for j in 1,2,3:
list3.append(type_func(list_of_words[index + j]) )
index+=3
list_of_vectors.append(vector.copy())
list_of_lists.append(list3)
if lists:
out = list_of_lists
else:
out = list_of_vectors
return out
def read_string(token, length, string):
sh = len(token)+1
i = string.find(token)+sh
# print('length', i, i+length)
# sys.exit()
if i is -1:
return ''
else:
return string[i:i+length]
def read_list(token, number_of_elements, ttype, list_of_words):
"""Input is token to find, number of elements to read, type of elements and list of words,
where to search
Returns the list of elements for the last match"""
number_of_matches = list_of_words.count( token )
#if number_of_elements == 0: raise RuntimeError
if number_of_matches > 1:
print_and_log("Warning token '"+token+"' was found more than one times\n")
raise RuntimeError
if number_of_matches == 0 or number_of_elements == 0:
#print_and_log("Warning token '"+token+"' was not found or asked number of elements is zero! set to [None]\n")
#if ttype == str:
# return ['']*number_of_elements
#else:
# return [0]*number_of_elements
return [None]
try:
index = list_of_words.index(token, number_of_matches - 1 ) #Return the index of the last match
except ValueError:
print_and_log("Warning!, token "+token+" was not found. I return [None]!\n")
return [None]
index+=1 #the position of token value
list_of_elements = []
#define function dependig on type:
if ttype == int :
def convert(a):
return int(a)
elif ttype == float:
def convert(a):
# print a
return float(a)
elif ttype == str :
def convert(a):
return str(a)
#print list_of_words[index], type(list_of_words[index])
if list_of_words[index] == "None" :
def convert(a):
return [None]
#Make convertion
for i in range(number_of_elements):
if 'None' in list_of_words[index]:
list_of_elements.append(None)
else:
list_of_elements.append( convert( list_of_words[index] ) )
index+=1
return list_of_elements
def words(fileobj):
"""Generator of words. However does not allow to use methods of list for returned"""
for line in fileobj:
for word in line.split():
yield word
def server_cp(copy_file, to, gz = True, scratch = False, new_filename = None):
if scratch:
if not header.PATH2ARCHIVE:
printlog('Warning! PATH2ARCHIVE is empty! Please put path archive in ~/simanrc.py or ./project_conf.py ')
copy_file = header.PATH2ARCHIVE + '/' + copy_file
else:
copy_file = header.project_path_cluster + '/' + copy_file
filename = os.path.basename(copy_file)
if new_filename is None:
new_filename = filename
if gz:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename + '.gz ; gunzip -f '+ to+ '/'+new_filename+'.gz'
else:
command = 'cp '+copy_file + ' ' + to +'/'+new_filename
printlog('Running on server', command, imp = '')
if file_exists_on_server(copy_file, header.cluster_address):
out = run_on_server(command, addr = header.cluster_address)
printlog('Output of run_on_server', out, imp = '')
else:
out = 'error, file does not exist on server: '+copy_file
return out
def wrapper_cp_on_server(file, to, new_filename = None):
"""
tries iterativly scratch and gz
"""
copy_to = to
copy_file = file
filename = os.path.basename(file)
if new_filename:
app = 'with new name '+new_filename
else:
app = ''
for s, gz in product([0,1], ['', '.gz']):
printlog('scratch, gz:', s, gz)
out = server_cp(copy_file+gz, to = to, gz = gz, scratch = s, new_filename = new_filename)
if out == '':
printlog('File', filename, 'was succesfully copied to',to, app, imp = 'y')
break
# else:
else:
printlog('Warning! File was not copied, probably it does not exist. Try using header.warnings = "neyY" for more details', imp = 'y')
return
def update_incar(parameter = None, value = None, u_ramp_step = None, write = True, f = None, run = False, st = None):
"""Modifications of INCAR. Take attention that *parameter* will be changed to new *value*
if it only already exist in INCAR. *u_ramp_step*-current step to determine u,
*write*-sometimes just the return value is needed.
Returns U value corresponding to *u_ramp_step*.
"""
self = st
u_step = None
if parameter == 'LDAUU':
#Update only non-zero elements of LDAUU with value
set_LDAUU_list = self.set.vasp_params['LDAUU']
new_LDAUU_list = copy.deepcopy(set_LDAUU_list)
# print set_LDAUU_list
u_step = 0.0
for i, u in enumerate(set_LDAUU_list):
if u == 0:
continue
u_step = np.linspace(0, u, self.set.u_ramping_nstep)[u_ramp_step]
u_step = np.round(u_step, 1)
# new_LDAUU_list[i] = value
new_LDAUU_list[i] = u_step
new_LDAUU = 'LDAUU = '+' '.join(['{:}']*len(new_LDAUU_list)).format(*new_LDAUU_list)
command = "sed -i.bak '/LDAUU/c\\" + new_LDAUU + "' INCAR\n"
#print('u_step',u_step)
#sys.exit()
elif parameter == 'MAGMOM':
new_incar_string = parameter + ' = ' + ' '.join(['{:}']*len(value)).format(*value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
# elif parameter in ['IMAGES', 'ISPIN']:
else:
new_incar_string = parameter + ' = ' + str(value)
command = "sed -i.bak '/"+parameter+"/c\\" + new_incar_string + "' INCAR\n"
if write and f:
f.write(command)
if run:
runBash(command)
return u_step #for last element
def check_output(filename, check_string, load):
"""
Check if file exist and it is finished by search for check_string
"""
if filename and os.path.exists(filename):
out = grep_file(check_string, filename, reverse = True)
printlog('The grep result of',filename, 'is:', out)
# sys.exit()
if check_string in out or 'un' in load:
state = '4. Finished'
else:
state = '5. Broken outcar'
else:
state = '5. no OUTCAR'
return state
|
gpl-2.0
| -4,048,599,918,254,212,600 | 27.993164 | 209 | 0.548688 | false |
lukecampbell/compliance-checker
|
compliance_checker/tests/test_ioos_sos.py
|
1
|
2367
|
import unittest
from compliance_checker.suite import CheckSuite
from compliance_checker.runner import ComplianceChecker
import os
import httpretty
# TODO: Use inheritance to eliminate redundant code in test setup, etc
class TestIOOSSOSGetCapabilities(unittest.TestCase):
def setUp(self):
with open(os.path.join(os.path.dirname(__file__),
'data/http_mocks/ncsos_getcapabilities.xml')) as f:
self.resp = f.read()
# need to monkey patch checkers prior to running tests, or no checker
# classes will show up
CheckSuite().load_all_available_checkers()
@httpretty.activate
def test_retrieve_getcaps(self):
"""Method that simulates retrieving SOS GetCapabilities"""
url = "http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml"
httpretty.register_uri(httpretty.GET, url, content_type="text/xml", body=self.resp)
# need to mock out the HEAD response so that compliance checker
# recognizes this as some sort of XML doc instead of an OPeNDAP
# source
ComplianceChecker.run_checker(url, ['ioos_sos'], 1, 'normal')
class TestIOOSSOSDescribeSensor(unittest.TestCase):
def setUp(self):
with open(os.path.join(os.path.dirname(__file__),
'data/http_mocks/ncsos_describesensor.xml')) as f:
self.resp = f.read()
# need to monkey patch checkers prior to running tests, or no checker
# classes will show up
CheckSuite().load_all_available_checkers()
@httpretty.activate
def test_retrieve_describesensor(self):
"""Method that simulates retrieving SOS DescribeSensor"""
url = ("http://data.oceansmap.com/thredds/sos/caricoos_ag/VIA/VIA.ncml?"
"request=describesensor"
"&service=sos"
"&procedure=urn:ioos:station:ncsos:VIA"
"&outputFormat=text/xml%3Bsubtype%3D%22sensorML/1.0.1/profiles/ioos_sos/1.0%22"
"&version=1.0.0")
httpretty.register_uri(httpretty.GET, url, content_type="text/xml", body=self.resp)
# need to mock out the HEAD response so that compliance checker
# recognizes this as some sort of XML doc instead of an OPeNDAP
# source
ComplianceChecker.run_checker(url, ['ioos_sos'], 1, 'normal')
|
apache-2.0
| -7,692,406,608,126,215,000 | 44.519231 | 95 | 0.652725 | false |
steabert/molpy
|
molpy/molden.py
|
1
|
5280
|
# molden.py -- Molden format
# Implements the Molden file format, specification can be found here:
# http://www.cmbi.ru.nl/molden/molden_format.html
#
# Copyright (c) 2016 Steven Vancoillie
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Written by Steven Vancoillie.
#
import copy
import numpy as np
from . import export
@export
class MolcasMOLDEN:
mx_angmom = 4
def __init__(self, filename, mode, strict=True):
self.f = open(filename, mode)
self.strict = strict
def close(self):
self.f.close()
def write(self, wfn):
"""
write wavefunction data to file
"""
if wfn.basis_set is None:
raise DataNotAvailable('The fchk format needs basis set info.')
n_atoms, nuclear_charge = wfn.nuclear_info()
n_electrons, n_a, n_b, spinmult, electronic_charge = wfn.electronic_info()
if np.isnan(spinmult):
spinmult = 1
if np.isnan(electronic_charge):
charge = 0
n_electrons = int(nuclear_charge)
n_b = (n_electrons - (spinmult - 1)) // 2
n_a = n_electrons - n_b
else:
charge = nuclear_charge + electronic_charge
self.f.write('[Molden Format]\n')
if not self.strict:
self.f.write('[N_ATOMS]\n')
self.write_natoms(n_atoms)
self.f.write('[Atoms] (AU)\n')
basis = wfn.basis_set
labels = basis.center_labels
charges = basis.center_charges
coords = basis.center_coordinates
self.write_atoms(labels, charges, coords)
self.f.write('[5D]\n')
self.f.write('[7F]\n')
self.f.write('[9G]\n')
if not self.strict:
self.f.write('[CHARGE] (MULLIKEN)\n')
mulliken_charges = wfn.mulliken_charges()
if np.logical_or.reduce(np.isnan(mulliken_charges)):
mulliken_charges.fill(0)
self.write_mulliken(mulliken_charges)
self.f.write('[GTO] (AU)\n')
self.write_gto(wfn.basis_set.primitive_tree)
self.f.write('[MO]\n')
for kind, orbitals in wfn.mo.items():
orbitals = orbitals.sort_basis(order='molden')
orbitals = orbitals.limit_basis(limit=self.mx_angmom)
orbitals.sanitize()
self.write_mo(orbitals, kind=kind)
def write_natoms(self, natoms):
self.f.write('{:12d}\n'.format(natoms))
def write_atoms(self, labels, charges, coords):
center_properties = zip(labels, charges, coords)
template = '{:s} {:7d} {:7d} {:14.7f} {:14.7f} {:14.7f}\n'
for i, (label, charge, coord,) in enumerate(center_properties):
label_nospaces = label.replace(' ','')
self.f.write(template.format(label_nospaces, i+1, int(charge), *coord))
def write_mulliken(self, charges):
for charge in charges:
self.f.write('{:f}\n'.format(charge))
def write_gto(self, basisset):
l = ['s', 'p', 'd', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n']
for center in basisset:
self.f.write('{:4d}\n'.format(center['id']))
for angmom in center['angmoms']:
if angmom['value'] > self.mx_angmom:
continue
for shell in angmom['shells']:
pgto_selection = np.where(shell['coefficients'] > 1.0e-15)
exponents = shell['exponents'][pgto_selection]
coefficients = shell['coefficients'][pgto_selection]
self.f.write(' {:1s}{:4d}\n'.format(l[angmom['value']], len(exponents)))
for exp, coef, in zip(exponents, coefficients):
self.f.write('{:17.9e} {:17.9e}\n'.format(exp, coef))
self.f.write('\n')
def write_mo(self, orbitals, kind='restricted'):
if kind == 'restricted':
spin = 'alpha'
else:
spin = kind
for irrep, ene, occ, mo in zip(
orbitals.irreps,
orbitals.energies,
orbitals.occupations,
orbitals.coefficients.T):
self.f.write('Sym = {:d}\n'.format(irrep))
self.f.write('Ene = {:10.4f}\n'.format(ene))
self.f.write('Spin = {:s}\n'.format(spin))
self.f.write('Occup = {:10.5f}\n'.format(occ))
for idx, coef, in enumerate(mo):
self.f.write('{:4d} {:16.8f}\n'.format(idx+1, coef))
@export
class MolcasMOLDENGV(MolcasMOLDEN):
def __init__(self, filename, mode):
super().__init__(filename, mode, strict=False)
|
gpl-2.0
| 4,687,063,611,598,560,000 | 35.413793 | 94 | 0.575568 | false |
SMALLplayer/smallplayer-image-creator
|
storage/.xbmc/addons/script.cu.lrclyrics/resources/lib/culrcscrapers/alsong/lyricsScraper.py
|
1
|
3106
|
# -*- Mode: python; coding: utf-8; tab-width: 8; indent-tabs-mode: t; -*-
"""
Scraper for http://lyrics.alsong.co.kr/
edge
"""
import sys
import socket
import hashlib
import urllib2
import xml.dom.minidom as xml
from utilities import *
from audiofile import AudioFile
__title__ = "Alsong"
__priority__ = '115'
__lrc__ = True
socket.setdefaulttimeout(10)
ALSONG_URL = "http://lyrics.alsong.net/alsongwebservice/service1.asmx"
ALSONG_TMPL = '''\
<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://www.w3.org/2003/05/soap-envelope" xmlns:SOAP-ENC="http://www.w3.org/2003/05/soap-encoding" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:ns2="ALSongWebServer/Service1Soap" xmlns:ns1="ALSongWebServer" xmlns:ns3="ALSongWebServer/Service1Soap12">
<SOAP-ENV:Body>
<ns1:GetLyric5>
<ns1:stQuery>
<ns1:strChecksum>%s</ns1:strChecksum>
<ns1:strVersion>2.2</ns1:strVersion>
<ns1:strMACAddress />
<ns1:strIPAddress />
</ns1:stQuery>
</ns1:GetLyric5>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
class alsongClient(object):
'''
privide alsong specific function, such as key from mp3
'''
@staticmethod
def GetKeyFromFile(file):
musf = AudioFile()
musf.Open(file)
ext = file[file.rfind('.'):].lower()
if ext == '.ogg':
buf = musf.ReadAudioStream(160*1024,11) # 160KB excluding header
elif ext == '.wma':
buf = musf.ReadAudioStream(160*1024,24) # 160KB excluding header
else:
buf = musf.ReadAudioStream(160*1024) # 160KB from audio data
musf.Close()
# calculate hashkey
m = hashlib.md5(); m.update(buf);
return m.hexdigest()
class LyricsFetcher:
def __init__( self ):
self.base_url = "http://lyrics.alsong.co.kr/"
def get_lyrics(self, song):
log( "%s: searching lyrics for %s - %s" % (__title__, song.artist, song.title))
lyrics = Lyrics()
lyrics.song = song
lyrics.source = __title__
lyrics.lrc = __lrc__
try:
key = alsongClient.GetKeyFromFile( song.filepath )
if not key:
return None
headers = { 'Content-Type' : 'text/xml; charset=utf-8' }
request = urllib2.Request(ALSONG_URL, ALSONG_TMPL % key, headers)
response = urllib2.urlopen(request)
Page = response.read()
except:
log( "%s: %s::%s (%d) [%s]" % (
__title__, self.__class__.__name__,
sys.exc_info()[ 2 ].tb_frame.f_code.co_name,
sys.exc_info()[ 2 ].tb_lineno,
sys.exc_info()[ 1 ]
))
return None
tree = xml.parseString( Page )
if tree.getElementsByTagName("strInfoID")[0].childNodes[0].data == '-1':
return None
lyr = tree.getElementsByTagName("strLyric")[0].childNodes[0].data.replace('<br>','\n')
lyrics.lyrics = lyr.encode('utf-8')
return lyrics
|
gpl-2.0
| -7,442,588,133,466,447,000 | 32.021277 | 344 | 0.585052 | false |
dderevjanik/agescx
|
agescx/scenario.py
|
1
|
3376
|
from .models import *
from .controller import *
from .utilities import *
# utilities
from .decompress import *
from .compress import *
class Scenario:
""" Scenario class """
def __init__(self, filename=None, ver=1.21):
"""create scenario with defaults values,
check default.txt for more information
Args:
filename (str, optional): load scenario from file
version (float, optional): specific version"""
self.version = ver
if filename:
self.load(filename)
else:
self._clear()
def __repr__(self):
name = "SCENARIO:{}\n".format(self.filename)
info1 = "\tWIDTH:{} HEIGHT:{}\n".format(self.tiles.width, self.tiles.height)
info2 = "\tUNITS:{}\n".format(len(self.units))
info3 = "\tTRIGGERS:{}".format(len(self.triggers))
return name + info1 + info2 + info3
def load(self, filename, ver=1.21):
"""
load scenario from file
it doesn't save current scenario
Args:
filename (str): scenario filename
ver (float, optional): version of scenario
Raises:
IOError: if file doesn't exits or is broken
"""
self._clear()
try:
f = open(filename, 'rb')
except:
raise(IOError("File is broken or doesn't exists"))
b = f.read() # get bytes from file
Decompress(self, b, ver, False) # load data
def save(self, filename=None, ver=1.21):
"""
save scenario as scx format
Args:
filename (str, optional): if set, it will create new
scenario file, otherwise rewrite current
ver (float, optional): save with specific
Todo:
finish this section
"""
if filename is None:
filename = self.filename # save to this scenario file
Compress(self, filename, ver)
def new(self, filename):
"""create whole new blank scenario
Args:
terrainType (int, optional): starting terrain type, 0
eleavtion (int, optional): starting elevation, 0
filename (str, optional): if sets, it will create
new scenario file
ver (float, optional): create version specific scenario
Todo:
finish starting terrainType and elevation
"""
self._clear()
self.version = "1.21"
self.version2 = 1.22
self.filename = filename
def _clear(self):
"""clear all scenario data"""
self.filename = None # scenario filename
self.version = None # scenario version
self.instructions = ""
self.plrnumb = 8
self.players = Players() # initialize players
self.messages = Messages() #
self.cinematics = Cinematics() # movies
self.background = Background() # pre-game image
self.map = Map()
self.tiles = self.map.tiles
self.goals = Goals()
self.units = Units()
self.triggers = Triggers()
self.debug = Debug()
for i in range(len(self.players)):
self.players[i].units = self.units[i]
self.timestamp = 0 # last save
|
mit
| 6,019,703,332,485,331,000 | 28.876106 | 84 | 0.544135 | false |
deanishe/alfred-packal-search
|
src/workflow/workflow.py
|
1
|
98456
|
# encoding: utf-8
#
# Copyright (c) 2014 Dean Jackson <deanishe@deanishe.net>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2014-02-15
#
"""The :class:`Workflow` object is the main interface to this library.
:class:`Workflow` is targeted at Alfred 2. Use
:class:`~workflow.Workflow3` if you want to use Alfred 3's new
features, such as :ref:`workflow variables <workflow-variables>` or
more powerful modifiers.
See :ref:`setup` in the :ref:`user-manual` for an example of how to set
up your Python script to best utilise the :class:`Workflow` object.
"""
from __future__ import print_function, unicode_literals
import atexit
import binascii
from contextlib import contextmanager
import cPickle
from copy import deepcopy
import errno
import json
import logging
import logging.handlers
import os
import pickle
import plistlib
import re
import shutil
import signal
import string
import subprocess
import sys
import time
import unicodedata
try:
import xml.etree.cElementTree as ET
except ImportError: # pragma: no cover
import xml.etree.ElementTree as ET
#: Sentinel for properties that haven't been set yet (that might
#: correctly have the value ``None``)
UNSET = object()
####################################################################
# Standard system icons
####################################################################
# These icons are default macOS icons. They are super-high quality, and
# will be familiar to users.
# This library uses `ICON_ERROR` when a workflow dies in flames, so
# in my own workflows, I use `ICON_WARNING` for less fatal errors
# (e.g. bad user input, no results etc.)
# The system icons are all in this directory. There are many more than
# are listed here
ICON_ROOT = '/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources'
ICON_ACCOUNT = os.path.join(ICON_ROOT, 'Accounts.icns')
ICON_BURN = os.path.join(ICON_ROOT, 'BurningIcon.icns')
ICON_CLOCK = os.path.join(ICON_ROOT, 'Clock.icns')
ICON_COLOR = os.path.join(ICON_ROOT, 'ProfileBackgroundColor.icns')
ICON_COLOUR = ICON_COLOR # Queen's English, if you please
ICON_EJECT = os.path.join(ICON_ROOT, 'EjectMediaIcon.icns')
# Shown when a workflow throws an error
ICON_ERROR = os.path.join(ICON_ROOT, 'AlertStopIcon.icns')
ICON_FAVORITE = os.path.join(ICON_ROOT, 'ToolbarFavoritesIcon.icns')
ICON_FAVOURITE = ICON_FAVORITE
ICON_GROUP = os.path.join(ICON_ROOT, 'GroupIcon.icns')
ICON_HELP = os.path.join(ICON_ROOT, 'HelpIcon.icns')
ICON_HOME = os.path.join(ICON_ROOT, 'HomeFolderIcon.icns')
ICON_INFO = os.path.join(ICON_ROOT, 'ToolbarInfo.icns')
ICON_NETWORK = os.path.join(ICON_ROOT, 'GenericNetworkIcon.icns')
ICON_NOTE = os.path.join(ICON_ROOT, 'AlertNoteIcon.icns')
ICON_SETTINGS = os.path.join(ICON_ROOT, 'ToolbarAdvanced.icns')
ICON_SWIRL = os.path.join(ICON_ROOT, 'ErasingIcon.icns')
ICON_SWITCH = os.path.join(ICON_ROOT, 'General.icns')
ICON_SYNC = os.path.join(ICON_ROOT, 'Sync.icns')
ICON_TRASH = os.path.join(ICON_ROOT, 'TrashIcon.icns')
ICON_USER = os.path.join(ICON_ROOT, 'UserIcon.icns')
ICON_WARNING = os.path.join(ICON_ROOT, 'AlertCautionIcon.icns')
ICON_WEB = os.path.join(ICON_ROOT, 'BookmarkIcon.icns')
####################################################################
# non-ASCII to ASCII diacritic folding.
# Used by `fold_to_ascii` method
####################################################################
ASCII_REPLACEMENTS = {
'À': 'A',
'Á': 'A',
'Â': 'A',
'Ã': 'A',
'Ä': 'A',
'Å': 'A',
'Æ': 'AE',
'Ç': 'C',
'È': 'E',
'É': 'E',
'Ê': 'E',
'Ë': 'E',
'Ì': 'I',
'Í': 'I',
'Î': 'I',
'Ï': 'I',
'Ð': 'D',
'Ñ': 'N',
'Ò': 'O',
'Ó': 'O',
'Ô': 'O',
'Õ': 'O',
'Ö': 'O',
'Ø': 'O',
'Ù': 'U',
'Ú': 'U',
'Û': 'U',
'Ü': 'U',
'Ý': 'Y',
'Þ': 'Th',
'ß': 'ss',
'à': 'a',
'á': 'a',
'â': 'a',
'ã': 'a',
'ä': 'a',
'å': 'a',
'æ': 'ae',
'ç': 'c',
'è': 'e',
'é': 'e',
'ê': 'e',
'ë': 'e',
'ì': 'i',
'í': 'i',
'î': 'i',
'ï': 'i',
'ð': 'd',
'ñ': 'n',
'ò': 'o',
'ó': 'o',
'ô': 'o',
'õ': 'o',
'ö': 'o',
'ø': 'o',
'ù': 'u',
'ú': 'u',
'û': 'u',
'ü': 'u',
'ý': 'y',
'þ': 'th',
'ÿ': 'y',
'Ł': 'L',
'ł': 'l',
'Ń': 'N',
'ń': 'n',
'Ņ': 'N',
'ņ': 'n',
'Ň': 'N',
'ň': 'n',
'Ŋ': 'ng',
'ŋ': 'NG',
'Ō': 'O',
'ō': 'o',
'Ŏ': 'O',
'ŏ': 'o',
'Ő': 'O',
'ő': 'o',
'Œ': 'OE',
'œ': 'oe',
'Ŕ': 'R',
'ŕ': 'r',
'Ŗ': 'R',
'ŗ': 'r',
'Ř': 'R',
'ř': 'r',
'Ś': 'S',
'ś': 's',
'Ŝ': 'S',
'ŝ': 's',
'Ş': 'S',
'ş': 's',
'Š': 'S',
'š': 's',
'Ţ': 'T',
'ţ': 't',
'Ť': 'T',
'ť': 't',
'Ŧ': 'T',
'ŧ': 't',
'Ũ': 'U',
'ũ': 'u',
'Ū': 'U',
'ū': 'u',
'Ŭ': 'U',
'ŭ': 'u',
'Ů': 'U',
'ů': 'u',
'Ű': 'U',
'ű': 'u',
'Ŵ': 'W',
'ŵ': 'w',
'Ŷ': 'Y',
'ŷ': 'y',
'Ÿ': 'Y',
'Ź': 'Z',
'ź': 'z',
'Ż': 'Z',
'ż': 'z',
'Ž': 'Z',
'ž': 'z',
'ſ': 's',
'Α': 'A',
'Β': 'B',
'Γ': 'G',
'Δ': 'D',
'Ε': 'E',
'Ζ': 'Z',
'Η': 'E',
'Θ': 'Th',
'Ι': 'I',
'Κ': 'K',
'Λ': 'L',
'Μ': 'M',
'Ν': 'N',
'Ξ': 'Ks',
'Ο': 'O',
'Π': 'P',
'Ρ': 'R',
'Σ': 'S',
'Τ': 'T',
'Υ': 'U',
'Φ': 'Ph',
'Χ': 'Kh',
'Ψ': 'Ps',
'Ω': 'O',
'α': 'a',
'β': 'b',
'γ': 'g',
'δ': 'd',
'ε': 'e',
'ζ': 'z',
'η': 'e',
'θ': 'th',
'ι': 'i',
'κ': 'k',
'λ': 'l',
'μ': 'm',
'ν': 'n',
'ξ': 'x',
'ο': 'o',
'π': 'p',
'ρ': 'r',
'ς': 's',
'σ': 's',
'τ': 't',
'υ': 'u',
'φ': 'ph',
'χ': 'kh',
'ψ': 'ps',
'ω': 'o',
'А': 'A',
'Б': 'B',
'В': 'V',
'Г': 'G',
'Д': 'D',
'Е': 'E',
'Ж': 'Zh',
'З': 'Z',
'И': 'I',
'Й': 'I',
'К': 'K',
'Л': 'L',
'М': 'M',
'Н': 'N',
'О': 'O',
'П': 'P',
'Р': 'R',
'С': 'S',
'Т': 'T',
'У': 'U',
'Ф': 'F',
'Х': 'Kh',
'Ц': 'Ts',
'Ч': 'Ch',
'Ш': 'Sh',
'Щ': 'Shch',
'Ъ': "'",
'Ы': 'Y',
'Ь': "'",
'Э': 'E',
'Ю': 'Iu',
'Я': 'Ia',
'а': 'a',
'б': 'b',
'в': 'v',
'г': 'g',
'д': 'd',
'е': 'e',
'ж': 'zh',
'з': 'z',
'и': 'i',
'й': 'i',
'к': 'k',
'л': 'l',
'м': 'm',
'н': 'n',
'о': 'o',
'п': 'p',
'р': 'r',
'с': 's',
'т': 't',
'у': 'u',
'ф': 'f',
'х': 'kh',
'ц': 'ts',
'ч': 'ch',
'ш': 'sh',
'щ': 'shch',
'ъ': "'",
'ы': 'y',
'ь': "'",
'э': 'e',
'ю': 'iu',
'я': 'ia',
# 'ᴀ': '',
# 'ᴁ': '',
# 'ᴂ': '',
# 'ᴃ': '',
# 'ᴄ': '',
# 'ᴅ': '',
# 'ᴆ': '',
# 'ᴇ': '',
# 'ᴈ': '',
# 'ᴉ': '',
# 'ᴊ': '',
# 'ᴋ': '',
# 'ᴌ': '',
# 'ᴍ': '',
# 'ᴎ': '',
# 'ᴏ': '',
# 'ᴐ': '',
# 'ᴑ': '',
# 'ᴒ': '',
# 'ᴓ': '',
# 'ᴔ': '',
# 'ᴕ': '',
# 'ᴖ': '',
# 'ᴗ': '',
# 'ᴘ': '',
# 'ᴙ': '',
# 'ᴚ': '',
# 'ᴛ': '',
# 'ᴜ': '',
# 'ᴝ': '',
# 'ᴞ': '',
# 'ᴟ': '',
# 'ᴠ': '',
# 'ᴡ': '',
# 'ᴢ': '',
# 'ᴣ': '',
# 'ᴤ': '',
# 'ᴥ': '',
'ᴦ': 'G',
'ᴧ': 'L',
'ᴨ': 'P',
'ᴩ': 'R',
'ᴪ': 'PS',
'ẞ': 'Ss',
'Ỳ': 'Y',
'ỳ': 'y',
'Ỵ': 'Y',
'ỵ': 'y',
'Ỹ': 'Y',
'ỹ': 'y',
}
####################################################################
# Smart-to-dumb punctuation mapping
####################################################################
DUMB_PUNCTUATION = {
'‘': "'",
'’': "'",
'‚': "'",
'“': '"',
'”': '"',
'„': '"',
'–': '-',
'—': '-'
}
####################################################################
# Used by `Workflow.filter`
####################################################################
# Anchor characters in a name
#: Characters that indicate the beginning of a "word" in CamelCase
INITIALS = string.ascii_uppercase + string.digits
#: Split on non-letters, numbers
split_on_delimiters = re.compile('[^a-zA-Z0-9]').split
# Match filter flags
#: Match items that start with ``query``
MATCH_STARTSWITH = 1
#: Match items whose capital letters start with ``query``
MATCH_CAPITALS = 2
#: Match items with a component "word" that matches ``query``
MATCH_ATOM = 4
#: Match items whose initials (based on atoms) start with ``query``
MATCH_INITIALS_STARTSWITH = 8
#: Match items whose initials (based on atoms) contain ``query``
MATCH_INITIALS_CONTAIN = 16
#: Combination of :const:`MATCH_INITIALS_STARTSWITH` and
#: :const:`MATCH_INITIALS_CONTAIN`
MATCH_INITIALS = 24
#: Match items if ``query`` is a substring
MATCH_SUBSTRING = 32
#: Match items if all characters in ``query`` appear in the item in order
MATCH_ALLCHARS = 64
#: Combination of all other ``MATCH_*`` constants
MATCH_ALL = 127
####################################################################
# Used by `Workflow.check_update`
####################################################################
# Number of days to wait between checking for updates to the workflow
DEFAULT_UPDATE_FREQUENCY = 1
####################################################################
# Lockfile and Keychain access errors
####################################################################
class AcquisitionError(Exception):
"""Raised if a lock cannot be acquired."""
class KeychainError(Exception):
"""Raised for unknown Keychain errors.
Raised by methods :meth:`Workflow.save_password`,
:meth:`Workflow.get_password` and :meth:`Workflow.delete_password`
when ``security`` CLI app returns an unknown error code.
"""
class PasswordNotFound(KeychainError):
"""Password not in Keychain.
Raised by method :meth:`Workflow.get_password` when ``account``
is unknown to the Keychain.
"""
class PasswordExists(KeychainError):
"""Raised when trying to overwrite an existing account password.
You should never receive this error: it is used internally
by the :meth:`Workflow.save_password` method to know if it needs
to delete the old password first (a Keychain implementation detail).
"""
####################################################################
# Helper functions
####################################################################
def isascii(text):
"""Test if ``text`` contains only ASCII characters.
:param text: text to test for ASCII-ness
:type text: ``unicode``
:returns: ``True`` if ``text`` contains only ASCII characters
:rtype: ``Boolean``
"""
try:
text.encode('ascii')
except UnicodeEncodeError:
return False
return True
####################################################################
# Implementation classes
####################################################################
class SerializerManager(object):
"""Contains registered serializers.
.. versionadded:: 1.8
A configured instance of this class is available at
:attr:`workflow.manager`.
Use :meth:`register()` to register new (or replace
existing) serializers, which you can specify by name when calling
:class:`~workflow.Workflow` data storage methods.
See :ref:`guide-serialization` and :ref:`guide-persistent-data`
for further information.
"""
def __init__(self):
"""Create new SerializerManager object."""
self._serializers = {}
def register(self, name, serializer):
"""Register ``serializer`` object under ``name``.
Raises :class:`AttributeError` if ``serializer`` in invalid.
.. note::
``name`` will be used as the file extension of the saved files.
:param name: Name to register ``serializer`` under
:type name: ``unicode`` or ``str``
:param serializer: object with ``load()`` and ``dump()``
methods
"""
# Basic validation
getattr(serializer, 'load')
getattr(serializer, 'dump')
self._serializers[name] = serializer
def serializer(self, name):
"""Return serializer object for ``name``.
:param name: Name of serializer to return
:type name: ``unicode`` or ``str``
:returns: serializer object or ``None`` if no such serializer
is registered.
"""
return self._serializers.get(name)
def unregister(self, name):
"""Remove registered serializer with ``name``.
Raises a :class:`ValueError` if there is no such registered
serializer.
:param name: Name of serializer to remove
:type name: ``unicode`` or ``str``
:returns: serializer object
"""
if name not in self._serializers:
raise ValueError('No such serializer registered : {0}'.format(
name))
serializer = self._serializers[name]
del self._serializers[name]
return serializer
@property
def serializers(self):
"""Return names of registered serializers."""
return sorted(self._serializers.keys())
class JSONSerializer(object):
"""Wrapper around :mod:`json`. Sets ``indent`` and ``encoding``.
.. versionadded:: 1.8
Use this serializer if you need readable data files. JSON doesn't
support Python objects as well as ``cPickle``/``pickle``, so be
careful which data you try to serialize as JSON.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open JSON file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from JSON file
:rtype: object
"""
return json.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open JSON file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: JSON-serializable data structure
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return json.dump(obj, file_obj, indent=2, encoding='utf-8')
class CPickleSerializer(object):
"""Wrapper around :mod:`cPickle`. Sets ``protocol``.
.. versionadded:: 1.8
This is the default serializer and the best combination of speed and
flexibility.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open pickle file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from pickle file
:rtype: object
"""
return cPickle.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open pickle file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: Python object
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return cPickle.dump(obj, file_obj, protocol=-1)
class PickleSerializer(object):
"""Wrapper around :mod:`pickle`. Sets ``protocol``.
.. versionadded:: 1.8
Use this serializer if you need to add custom pickling.
"""
@classmethod
def load(cls, file_obj):
"""Load serialized object from open pickle file.
.. versionadded:: 1.8
:param file_obj: file handle
:type file_obj: ``file`` object
:returns: object loaded from pickle file
:rtype: object
"""
return pickle.load(file_obj)
@classmethod
def dump(cls, obj, file_obj):
"""Serialize object ``obj`` to open pickle file.
.. versionadded:: 1.8
:param obj: Python object to serialize
:type obj: Python object
:param file_obj: file handle
:type file_obj: ``file`` object
"""
return pickle.dump(obj, file_obj, protocol=-1)
# Set up default manager and register built-in serializers
manager = SerializerManager()
manager.register('cpickle', CPickleSerializer)
manager.register('pickle', PickleSerializer)
manager.register('json', JSONSerializer)
class Item(object):
"""Represents a feedback item for Alfred.
Generates Alfred-compliant XML for a single item.
You probably shouldn't use this class directly, but via
:meth:`Workflow.add_item`. See :meth:`~Workflow.add_item`
for details of arguments.
"""
def __init__(self, title, subtitle='', modifier_subtitles=None,
arg=None, autocomplete=None, valid=False, uid=None,
icon=None, icontype=None, type=None, largetext=None,
copytext=None, quicklookurl=None):
"""Same arguments as :meth:`Workflow.add_item`."""
self.title = title
self.subtitle = subtitle
self.modifier_subtitles = modifier_subtitles or {}
self.arg = arg
self.autocomplete = autocomplete
self.valid = valid
self.uid = uid
self.icon = icon
self.icontype = icontype
self.type = type
self.largetext = largetext
self.copytext = copytext
self.quicklookurl = quicklookurl
@property
def elem(self):
"""Create and return feedback item for Alfred.
:returns: :class:`ElementTree.Element <xml.etree.ElementTree.Element>`
instance for this :class:`Item` instance.
"""
# Attributes on <item> element
attr = {}
if self.valid:
attr['valid'] = 'yes'
else:
attr['valid'] = 'no'
# Allow empty string for autocomplete. This is a useful value,
# as TABing the result will revert the query back to just the
# keyword
if self.autocomplete is not None:
attr['autocomplete'] = self.autocomplete
# Optional attributes
for name in ('uid', 'type'):
value = getattr(self, name, None)
if value:
attr[name] = value
root = ET.Element('item', attr)
ET.SubElement(root, 'title').text = self.title
ET.SubElement(root, 'subtitle').text = self.subtitle
# Add modifier subtitles
for mod in ('cmd', 'ctrl', 'alt', 'shift', 'fn'):
if mod in self.modifier_subtitles:
ET.SubElement(root, 'subtitle',
{'mod': mod}).text = self.modifier_subtitles[mod]
# Add arg as element instead of attribute on <item>, as it's more
# flexible (newlines aren't allowed in attributes)
if self.arg:
ET.SubElement(root, 'arg').text = self.arg
# Add icon if there is one
if self.icon:
if self.icontype:
attr = dict(type=self.icontype)
else:
attr = {}
ET.SubElement(root, 'icon', attr).text = self.icon
if self.largetext:
ET.SubElement(root, 'text',
{'type': 'largetype'}).text = self.largetext
if self.copytext:
ET.SubElement(root, 'text',
{'type': 'copy'}).text = self.copytext
if self.quicklookurl:
ET.SubElement(root, 'quicklookurl').text = self.quicklookurl
return root
class LockFile(object):
"""Context manager to protect filepaths with lockfiles.
.. versionadded:: 1.13
Creates a lockfile alongside ``protected_path``. Other ``LockFile``
instances will refuse to lock the same path.
>>> path = '/path/to/file'
>>> with LockFile(path):
>>> with open(path, 'wb') as fp:
>>> fp.write(data)
Args:
protected_path (unicode): File to protect with a lockfile
timeout (int, optional): Raises an :class:`AcquisitionError`
if lock cannot be acquired within this number of seconds.
If ``timeout`` is 0 (the default), wait forever.
delay (float, optional): How often to check (in seconds) if
lock has been released.
"""
def __init__(self, protected_path, timeout=0, delay=0.05):
"""Create new :class:`LockFile` object."""
self.lockfile = protected_path + '.lock'
self.timeout = timeout
self.delay = delay
self._locked = False
atexit.register(self.release)
@property
def locked(self):
"""`True` if file is locked by this instance."""
return self._locked
def acquire(self, blocking=True):
"""Acquire the lock if possible.
If the lock is in use and ``blocking`` is ``False``, return
``False``.
Otherwise, check every `self.delay` seconds until it acquires
lock or exceeds `self.timeout` and raises an `~AcquisitionError`.
"""
start = time.time()
while True:
self._validate_lockfile()
try:
fd = os.open(self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)
with os.fdopen(fd, 'w') as fd:
fd.write(str(os.getpid()))
break
except OSError as err:
if err.errno != errno.EEXIST: # pragma: no cover
raise
if self.timeout and (time.time() - start) >= self.timeout:
raise AcquisitionError('lock acquisition timed out')
if not blocking:
return False
time.sleep(self.delay)
self._locked = True
return True
def _validate_lockfile(self):
"""Check existence and validity of lockfile.
If the lockfile exists, but contains an invalid PID
or the PID of a non-existant process, it is removed.
"""
try:
with open(self.lockfile) as fp:
s = fp.read()
except Exception:
return
try:
pid = int(s)
except ValueError:
return self.release()
from background import _process_exists
if not _process_exists(pid):
self.release()
def release(self):
"""Release the lock by deleting `self.lockfile`."""
self._locked = False
try:
os.unlink(self.lockfile)
except (OSError, IOError) as err: # pragma: no cover
if err.errno != 2:
raise err
def __enter__(self):
"""Acquire lock."""
self.acquire()
return self
def __exit__(self, typ, value, traceback):
"""Release lock."""
self.release()
def __del__(self):
"""Clear up `self.lockfile`."""
if self._locked: # pragma: no cover
self.release()
@contextmanager
def atomic_writer(file_path, mode):
"""Atomic file writer.
.. versionadded:: 1.12
Context manager that ensures the file is only written if the write
succeeds. The data is first written to a temporary file.
:param file_path: path of file to write to.
:type file_path: ``unicode``
:param mode: sames as for :func:`open`
:type mode: string
"""
temp_suffix = '.aw.temp'
temp_file_path = file_path + temp_suffix
with open(temp_file_path, mode) as file_obj:
try:
yield file_obj
os.rename(temp_file_path, file_path)
finally:
try:
os.remove(temp_file_path)
except (OSError, IOError):
pass
class uninterruptible(object):
"""Decorator that postpones SIGTERM until wrapped function returns.
.. versionadded:: 1.12
.. important:: This decorator is NOT thread-safe.
As of version 2.7, Alfred allows Script Filters to be killed. If
your workflow is killed in the middle of critical code (e.g.
writing data to disk), this may corrupt your workflow's data.
Use this decorator to wrap critical functions that *must* complete.
If the script is killed while a wrapped function is executing,
the SIGTERM will be caught and handled after your function has
finished executing.
Alfred-Workflow uses this internally to ensure its settings, data
and cache writes complete.
"""
def __init__(self, func, class_name=''):
"""Decorate `func`."""
self.func = func
self._caught_signal = None
def signal_handler(self, signum, frame):
"""Called when process receives SIGTERM."""
self._caught_signal = (signum, frame)
def __call__(self, *args, **kwargs):
"""Trap ``SIGTERM`` and call wrapped function."""
self._caught_signal = None
# Register handler for SIGTERM, then call `self.func`
self.old_signal_handler = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.signal_handler)
self.func(*args, **kwargs)
# Restore old signal handler
signal.signal(signal.SIGTERM, self.old_signal_handler)
# Handle any signal caught during execution
if self._caught_signal is not None:
signum, frame = self._caught_signal
if callable(self.old_signal_handler):
self.old_signal_handler(signum, frame)
elif self.old_signal_handler == signal.SIG_DFL:
sys.exit(0)
def __get__(self, obj=None, klass=None):
"""Decorator API."""
return self.__class__(self.func.__get__(obj, klass),
klass.__name__)
class Settings(dict):
"""A dictionary that saves itself when changed.
Dictionary keys & values will be saved as a JSON file
at ``filepath``. If the file does not exist, the dictionary
(and settings file) will be initialised with ``defaults``.
:param filepath: where to save the settings
:type filepath: :class:`unicode`
:param defaults: dict of default settings
:type defaults: :class:`dict`
An appropriate instance is provided by :class:`Workflow` instances at
:attr:`Workflow.settings`.
"""
def __init__(self, filepath, defaults=None):
"""Create new :class:`Settings` object."""
super(Settings, self).__init__()
self._filepath = filepath
self._nosave = False
self._original = {}
if os.path.exists(self._filepath):
self._load()
elif defaults:
for key, val in defaults.items():
self[key] = val
self.save() # save default settings
def _load(self):
"""Load cached settings from JSON file `self._filepath`."""
self._nosave = True
d = {}
with open(self._filepath, 'rb') as file_obj:
for key, value in json.load(file_obj, encoding='utf-8').items():
d[key] = value
self.update(d)
self._original = deepcopy(d)
self._nosave = False
@uninterruptible
def save(self):
"""Save settings to JSON file specified in ``self._filepath``.
If you're using this class via :attr:`Workflow.settings`, which
you probably are, ``self._filepath`` will be ``settings.json``
in your workflow's data directory (see :attr:`~Workflow.datadir`).
"""
if self._nosave:
return
data = {}
data.update(self)
# for key, value in self.items():
# data[key] = value
with LockFile(self._filepath):
with atomic_writer(self._filepath, 'wb') as file_obj:
json.dump(data, file_obj, sort_keys=True, indent=2,
encoding='utf-8')
# dict methods
def __setitem__(self, key, value):
"""Implement :class:`dict` interface."""
if self._original.get(key) != value:
super(Settings, self).__setitem__(key, value)
self.save()
def __delitem__(self, key):
"""Implement :class:`dict` interface."""
super(Settings, self).__delitem__(key)
self.save()
def update(self, *args, **kwargs):
"""Override :class:`dict` method to save on update."""
super(Settings, self).update(*args, **kwargs)
self.save()
def setdefault(self, key, value=None):
"""Override :class:`dict` method to save on update."""
ret = super(Settings, self).setdefault(key, value)
self.save()
return ret
class Workflow(object):
"""The ``Workflow`` object is the main interface to Alfred-Workflow.
It provides APIs for accessing the Alfred/workflow environment,
storing & caching data, using Keychain, and generating Script
Filter feedback.
``Workflow`` is compatible with both Alfred 2 and 3. The
:class:`~workflow.Workflow3` subclass provides additional,
Alfred 3-only features, such as workflow variables.
:param default_settings: default workflow settings. If no settings file
exists, :class:`Workflow.settings` will be pre-populated with
``default_settings``.
:type default_settings: :class:`dict`
:param update_settings: settings for updating your workflow from
GitHub releases. The only required key is ``github_slug``,
whose value must take the form of ``username/repo``.
If specified, ``Workflow`` will check the repo's releases
for updates. Your workflow must also have a semantic version
number. Please see the :ref:`User Manual <user-manual>` and
`update API docs <api-updates>` for more information.
:type update_settings: :class:`dict`
:param input_encoding: encoding of command line arguments. You
should probably leave this as the default (``utf-8``), which
is the encoding Alfred uses.
:type input_encoding: :class:`unicode`
:param normalization: normalisation to apply to CLI args.
See :meth:`Workflow.decode` for more details.
:type normalization: :class:`unicode`
:param capture_args: Capture and act on ``workflow:*`` arguments. See
:ref:`Magic arguments <magic-arguments>` for details.
:type capture_args: :class:`Boolean`
:param libraries: sequence of paths to directories containing
libraries. These paths will be prepended to ``sys.path``.
:type libraries: :class:`tuple` or :class:`list`
:param help_url: URL to webpage where a user can ask for help with
the workflow, report bugs, etc. This could be the GitHub repo
or a page on AlfredForum.com. If your workflow throws an error,
this URL will be displayed in the log and Alfred's debugger. It can
also be opened directly in a web browser with the ``workflow:help``
:ref:`magic argument <magic-arguments>`.
:type help_url: :class:`unicode` or :class:`str`
"""
# Which class to use to generate feedback items. You probably
# won't want to change this
item_class = Item
def __init__(self, default_settings=None, update_settings=None,
input_encoding='utf-8', normalization='NFC',
capture_args=True, libraries=None,
help_url=None):
"""Create new :class:`Workflow` object."""
self._default_settings = default_settings or {}
self._update_settings = update_settings or {}
self._input_encoding = input_encoding
self._normalizsation = normalization
self._capture_args = capture_args
self.help_url = help_url
self._workflowdir = None
self._settings_path = None
self._settings = None
self._bundleid = None
self._debugging = None
self._name = None
self._cache_serializer = 'cpickle'
self._data_serializer = 'cpickle'
self._info = None
self._info_loaded = False
self._logger = None
self._items = []
self._alfred_env = None
# Version number of the workflow
self._version = UNSET
# Version from last workflow run
self._last_version_run = UNSET
# Cache for regex patterns created for filter keys
self._search_pattern_cache = {}
# Magic arguments
#: The prefix for all magic arguments. Default is ``workflow:``
self.magic_prefix = 'workflow:'
#: Mapping of available magic arguments. The built-in magic
#: arguments are registered by default. To add your own magic arguments
#: (or override built-ins), add a key:value pair where the key is
#: what the user should enter (prefixed with :attr:`magic_prefix`)
#: and the value is a callable that will be called when the argument
#: is entered. If you would like to display a message in Alfred, the
#: function should return a ``unicode`` string.
#:
#: By default, the magic arguments documented
#: :ref:`here <magic-arguments>` are registered.
self.magic_arguments = {}
self._register_default_magic()
if libraries:
sys.path = libraries + sys.path
####################################################################
# API methods
####################################################################
# info.plist contents and alfred_* environment variables ----------
@property
def alfred_version(self):
"""Alfred version as :class:`~workflow.update.Version` object."""
from update import Version
return Version(self.alfred_env.get('version'))
@property
def alfred_env(self):
"""Dict of Alfred's environmental variables minus ``alfred_`` prefix.
.. versionadded:: 1.7
The variables Alfred 2.4+ exports are:
============================ =========================================
Variable Description
============================ =========================================
debug Set to ``1`` if Alfred's debugger is
open, otherwise unset.
preferences Path to Alfred.alfredpreferences
(where your workflows and settings are
stored).
preferences_localhash Machine-specific preferences are stored
in ``Alfred.alfredpreferences/preferences/local/<hash>``
(see ``preferences`` above for
the path to ``Alfred.alfredpreferences``)
theme ID of selected theme
theme_background Background colour of selected theme in
format ``rgba(r,g,b,a)``
theme_subtext Show result subtext.
``0`` = Always,
``1`` = Alternative actions only,
``2`` = Selected result only,
``3`` = Never
version Alfred version number, e.g. ``'2.4'``
version_build Alfred build number, e.g. ``277``
workflow_bundleid Bundle ID, e.g.
``net.deanishe.alfred-mailto``
workflow_cache Path to workflow's cache directory
workflow_data Path to workflow's data directory
workflow_name Name of current workflow
workflow_uid UID of workflow
workflow_version The version number specified in the
workflow configuration sheet/info.plist
============================ =========================================
**Note:** all values are Unicode strings except ``version_build`` and
``theme_subtext``, which are integers.
:returns: ``dict`` of Alfred's environmental variables without the
``alfred_`` prefix, e.g. ``preferences``, ``workflow_data``.
"""
if self._alfred_env is not None:
return self._alfred_env
data = {}
for key in (
'alfred_debug',
'alfred_preferences',
'alfred_preferences_localhash',
'alfred_theme',
'alfred_theme_background',
'alfred_theme_subtext',
'alfred_version',
'alfred_version_build',
'alfred_workflow_bundleid',
'alfred_workflow_cache',
'alfred_workflow_data',
'alfred_workflow_name',
'alfred_workflow_uid',
'alfred_workflow_version'):
value = os.getenv(key)
if isinstance(value, str):
if key in ('alfred_debug', 'alfred_version_build',
'alfred_theme_subtext'):
value = int(value)
else:
value = self.decode(value)
data[key[7:]] = value
self._alfred_env = data
return self._alfred_env
@property
def info(self):
""":class:`dict` of ``info.plist`` contents."""
if not self._info_loaded:
self._load_info_plist()
return self._info
@property
def bundleid(self):
"""Workflow bundle ID from environmental vars or ``info.plist``.
:returns: bundle ID
:rtype: ``unicode``
"""
if not self._bundleid:
if self.alfred_env.get('workflow_bundleid'):
self._bundleid = self.alfred_env.get('workflow_bundleid')
else:
self._bundleid = unicode(self.info['bundleid'], 'utf-8')
return self._bundleid
@property
def debugging(self):
"""Whether Alfred's debugger is open.
:returns: ``True`` if Alfred's debugger is open.
:rtype: ``bool``
"""
if self._debugging is None:
if self.alfred_env.get('debug') == 1:
self._debugging = True
else:
self._debugging = False
return self._debugging
@property
def name(self):
"""Workflow name from Alfred's environmental vars or ``info.plist``.
:returns: workflow name
:rtype: ``unicode``
"""
if not self._name:
if self.alfred_env.get('workflow_name'):
self._name = self.decode(self.alfred_env.get('workflow_name'))
else:
self._name = self.decode(self.info['name'])
return self._name
@property
def version(self):
"""Return the version of the workflow.
.. versionadded:: 1.9.10
Get the workflow version from environment variable,
the ``update_settings`` dict passed on
instantiation, the ``version`` file located in the workflow's
root directory or ``info.plist``. Return ``None`` if none
exists or :class:`ValueError` if the version number is invalid
(i.e. not semantic).
:returns: Version of the workflow (not Alfred-Workflow)
:rtype: :class:`~workflow.update.Version` object
"""
if self._version is UNSET:
version = None
# environment variable has priority
if self.alfred_env.get('workflow_version'):
version = self.alfred_env['workflow_version']
# Try `update_settings`
elif self._update_settings:
version = self._update_settings.get('version')
# `version` file
if not version:
filepath = self.workflowfile('version')
if os.path.exists(filepath):
with open(filepath, 'rb') as fileobj:
version = fileobj.read()
# info.plist
if not version:
version = self.info.get('version')
if version:
from update import Version
version = Version(version)
self._version = version
return self._version
# Workflow utility methods -----------------------------------------
@property
def args(self):
"""Return command line args as normalised unicode.
Args are decoded and normalised via :meth:`~Workflow.decode`.
The encoding and normalisation are the ``input_encoding`` and
``normalization`` arguments passed to :class:`Workflow` (``UTF-8``
and ``NFC`` are the defaults).
If :class:`Workflow` is called with ``capture_args=True``
(the default), :class:`Workflow` will look for certain
``workflow:*`` args and, if found, perform the corresponding
actions and exit the workflow.
See :ref:`Magic arguments <magic-arguments>` for details.
"""
msg = None
args = [self.decode(arg) for arg in sys.argv[1:]]
# Handle magic args
if len(args) and self._capture_args:
for name in self.magic_arguments:
key = '{0}{1}'.format(self.magic_prefix, name)
if key in args:
msg = self.magic_arguments[name]()
if msg:
self.logger.debug(msg)
if not sys.stdout.isatty(): # Show message in Alfred
self.add_item(msg, valid=False, icon=ICON_INFO)
self.send_feedback()
sys.exit(0)
return args
@property
def cachedir(self):
"""Path to workflow's cache directory.
The cache directory is a subdirectory of Alfred's own cache directory
in ``~/Library/Caches``. The full path is:
``~/Library/Caches/com.runningwithcrayons.Alfred-X/Workflow Data/<bundle id>``
``Alfred-X`` may be ``Alfred-2`` or ``Alfred-3``.
:returns: full path to workflow's cache directory
:rtype: ``unicode``
"""
if self.alfred_env.get('workflow_cache'):
dirpath = self.alfred_env.get('workflow_cache')
else:
dirpath = self._default_cachedir
return self._create(dirpath)
@property
def _default_cachedir(self):
"""Alfred 2's default cache directory."""
return os.path.join(
os.path.expanduser(
'~/Library/Caches/com.runningwithcrayons.Alfred-2/'
'Workflow Data/'),
self.bundleid)
@property
def datadir(self):
"""Path to workflow's data directory.
The data directory is a subdirectory of Alfred's own data directory in
``~/Library/Application Support``. The full path is:
``~/Library/Application Support/Alfred 2/Workflow Data/<bundle id>``
:returns: full path to workflow data directory
:rtype: ``unicode``
"""
if self.alfred_env.get('workflow_data'):
dirpath = self.alfred_env.get('workflow_data')
else:
dirpath = self._default_datadir
return self._create(dirpath)
@property
def _default_datadir(self):
"""Alfred 2's default data directory."""
return os.path.join(os.path.expanduser(
'~/Library/Application Support/Alfred 2/Workflow Data/'),
self.bundleid)
@property
def workflowdir(self):
"""Path to workflow's root directory (where ``info.plist`` is).
:returns: full path to workflow root directory
:rtype: ``unicode``
"""
if not self._workflowdir:
# Try the working directory first, then the directory
# the library is in. CWD will be the workflow root if
# a workflow is being run in Alfred
candidates = [
os.path.abspath(os.getcwdu()),
os.path.dirname(os.path.abspath(os.path.dirname(__file__)))]
# climb the directory tree until we find `info.plist`
for dirpath in candidates:
# Ensure directory path is Unicode
dirpath = self.decode(dirpath)
while True:
if os.path.exists(os.path.join(dirpath, 'info.plist')):
self._workflowdir = dirpath
break
elif dirpath == '/':
# no `info.plist` found
break
# Check the parent directory
dirpath = os.path.dirname(dirpath)
# No need to check other candidates
if self._workflowdir:
break
if not self._workflowdir:
raise IOError("'info.plist' not found in directory tree")
return self._workflowdir
def cachefile(self, filename):
"""Path to ``filename`` in workflow's cache directory.
Return absolute path to ``filename`` within your workflow's
:attr:`cache directory <Workflow.cachedir>`.
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within cache directory
:rtype: ``unicode``
"""
return os.path.join(self.cachedir, filename)
def datafile(self, filename):
"""Path to ``filename`` in workflow's data directory.
Return absolute path to ``filename`` within your workflow's
:attr:`data directory <Workflow.datadir>`.
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within data directory
:rtype: ``unicode``
"""
return os.path.join(self.datadir, filename)
def workflowfile(self, filename):
"""Return full path to ``filename`` in workflow's root directory.
:param filename: basename of file
:type filename: ``unicode``
:returns: full path to file within data directory
:rtype: ``unicode``
"""
return os.path.join(self.workflowdir, filename)
@property
def logfile(self):
"""Path to logfile.
:returns: path to logfile within workflow's cache directory
:rtype: ``unicode``
"""
return self.cachefile('%s.log' % self.bundleid)
@property
def logger(self):
"""Logger that logs to both console and a log file.
If Alfred's debugger is open, log level will be ``DEBUG``,
else it will be ``INFO``.
Use :meth:`open_log` to open the log file in Console.
:returns: an initialised :class:`~logging.Logger`
"""
if self._logger:
return self._logger
# Initialise new logger and optionally handlers
logger = logging.getLogger('workflow')
if not len(logger.handlers): # Only add one set of handlers
fmt = logging.Formatter(
'%(asctime)s %(filename)s:%(lineno)s'
' %(levelname)-8s %(message)s',
datefmt='%H:%M:%S')
logfile = logging.handlers.RotatingFileHandler(
self.logfile,
maxBytes=1024 * 1024,
backupCount=1)
logfile.setFormatter(fmt)
logger.addHandler(logfile)
console = logging.StreamHandler()
console.setFormatter(fmt)
logger.addHandler(console)
if self.debugging:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
self._logger = logger
return self._logger
@logger.setter
def logger(self, logger):
"""Set a custom logger.
:param logger: The logger to use
:type logger: `~logging.Logger` instance
"""
self._logger = logger
@property
def settings_path(self):
"""Path to settings file within workflow's data directory.
:returns: path to ``settings.json`` file
:rtype: ``unicode``
"""
if not self._settings_path:
self._settings_path = self.datafile('settings.json')
return self._settings_path
@property
def settings(self):
"""Return a dictionary subclass that saves itself when changed.
See :ref:`guide-settings` in the :ref:`user-manual` for more
information on how to use :attr:`settings` and **important
limitations** on what it can do.
:returns: :class:`~workflow.workflow.Settings` instance
initialised from the data in JSON file at
:attr:`settings_path` or if that doesn't exist, with the
``default_settings`` :class:`dict` passed to
:class:`Workflow` on instantiation.
:rtype: :class:`~workflow.workflow.Settings` instance
"""
if not self._settings:
self.logger.debug('reading settings from %s', self.settings_path)
self._settings = Settings(self.settings_path,
self._default_settings)
return self._settings
@property
def cache_serializer(self):
"""Name of default cache serializer.
.. versionadded:: 1.8
This serializer is used by :meth:`cache_data()` and
:meth:`cached_data()`
See :class:`SerializerManager` for details.
:returns: serializer name
:rtype: ``unicode``
"""
return self._cache_serializer
@cache_serializer.setter
def cache_serializer(self, serializer_name):
"""Set the default cache serialization format.
.. versionadded:: 1.8
This serializer is used by :meth:`cache_data()` and
:meth:`cached_data()`
The specified serializer must already by registered with the
:class:`SerializerManager` at `~workflow.workflow.manager`,
otherwise a :class:`ValueError` will be raised.
:param serializer_name: Name of default serializer to use.
:type serializer_name:
"""
if manager.serializer(serializer_name) is None:
raise ValueError(
'Unknown serializer : `{0}`. Register your serializer '
'with `manager` first.'.format(serializer_name))
self.logger.debug('default cache serializer: %s', serializer_name)
self._cache_serializer = serializer_name
@property
def data_serializer(self):
"""Name of default data serializer.
.. versionadded:: 1.8
This serializer is used by :meth:`store_data()` and
:meth:`stored_data()`
See :class:`SerializerManager` for details.
:returns: serializer name
:rtype: ``unicode``
"""
return self._data_serializer
@data_serializer.setter
def data_serializer(self, serializer_name):
"""Set the default cache serialization format.
.. versionadded:: 1.8
This serializer is used by :meth:`store_data()` and
:meth:`stored_data()`
The specified serializer must already by registered with the
:class:`SerializerManager` at `~workflow.workflow.manager`,
otherwise a :class:`ValueError` will be raised.
:param serializer_name: Name of serializer to use by default.
"""
if manager.serializer(serializer_name) is None:
raise ValueError(
'Unknown serializer : `{0}`. Register your serializer '
'with `manager` first.'.format(serializer_name))
self.logger.debug('default data serializer: %s', serializer_name)
self._data_serializer = serializer_name
def stored_data(self, name):
"""Retrieve data from data directory.
Returns ``None`` if there are no data stored under ``name``.
.. versionadded:: 1.8
:param name: name of datastore
"""
metadata_path = self.datafile('.{0}.alfred-workflow'.format(name))
if not os.path.exists(metadata_path):
self.logger.debug('no data stored for `%s`', name)
return None
with open(metadata_path, 'rb') as file_obj:
serializer_name = file_obj.read().strip()
serializer = manager.serializer(serializer_name)
if serializer is None:
raise ValueError(
'Unknown serializer `{0}`. Register a corresponding '
'serializer with `manager.register()` '
'to load this data.'.format(serializer_name))
self.logger.debug('data `%s` stored as `%s`', name, serializer_name)
filename = '{0}.{1}'.format(name, serializer_name)
data_path = self.datafile(filename)
if not os.path.exists(data_path):
self.logger.debug('no data stored: %s', name)
if os.path.exists(metadata_path):
os.unlink(metadata_path)
return None
with open(data_path, 'rb') as file_obj:
data = serializer.load(file_obj)
self.logger.debug('stored data loaded: %s', data_path)
return data
def store_data(self, name, data, serializer=None):
"""Save data to data directory.
.. versionadded:: 1.8
If ``data`` is ``None``, the datastore will be deleted.
Note that the datastore does NOT support mutliple threads.
:param name: name of datastore
:param data: object(s) to store. **Note:** some serializers
can only handled certain types of data.
:param serializer: name of serializer to use. If no serializer
is specified, the default will be used. See
:class:`SerializerManager` for more information.
:returns: data in datastore or ``None``
"""
# Ensure deletion is not interrupted by SIGTERM
@uninterruptible
def delete_paths(paths):
"""Clear one or more data stores"""
for path in paths:
if os.path.exists(path):
os.unlink(path)
self.logger.debug('deleted data file: %s', path)
serializer_name = serializer or self.data_serializer
# In order for `stored_data()` to be able to load data stored with
# an arbitrary serializer, yet still have meaningful file extensions,
# the format (i.e. extension) is saved to an accompanying file
metadata_path = self.datafile('.{0}.alfred-workflow'.format(name))
filename = '{0}.{1}'.format(name, serializer_name)
data_path = self.datafile(filename)
if data_path == self.settings_path:
raise ValueError(
'Cannot save data to' +
'`{0}` with format `{1}`. '.format(name, serializer_name) +
"This would overwrite Alfred-Workflow's settings file.")
serializer = manager.serializer(serializer_name)
if serializer is None:
raise ValueError(
'Invalid serializer `{0}`. Register your serializer with '
'`manager.register()` first.'.format(serializer_name))
if data is None: # Delete cached data
delete_paths((metadata_path, data_path))
return
# Ensure write is not interrupted by SIGTERM
@uninterruptible
def _store():
# Save file extension
with atomic_writer(metadata_path, 'wb') as file_obj:
file_obj.write(serializer_name)
with atomic_writer(data_path, 'wb') as file_obj:
serializer.dump(data, file_obj)
_store()
self.logger.debug('saved data: %s', data_path)
def cached_data(self, name, data_func=None, max_age=60):
"""Return cached data if younger than ``max_age`` seconds.
Retrieve data from cache or re-generate and re-cache data if
stale/non-existant. If ``max_age`` is 0, return cached data no
matter how old.
:param name: name of datastore
:param data_func: function to (re-)generate data.
:type data_func: ``callable``
:param max_age: maximum age of cached data in seconds
:type max_age: ``int``
:returns: cached data, return value of ``data_func`` or ``None``
if ``data_func`` is not set
"""
serializer = manager.serializer(self.cache_serializer)
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
age = self.cached_data_age(name)
if (age < max_age or max_age == 0) and os.path.exists(cache_path):
with open(cache_path, 'rb') as file_obj:
self.logger.debug('loading cached data: %s', cache_path)
return serializer.load(file_obj)
if not data_func:
return None
data = data_func()
self.cache_data(name, data)
return data
def cache_data(self, name, data):
"""Save ``data`` to cache under ``name``.
If ``data`` is ``None``, the corresponding cache file will be
deleted.
:param name: name of datastore
:param data: data to store. This may be any object supported by
the cache serializer
"""
serializer = manager.serializer(self.cache_serializer)
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
if data is None:
if os.path.exists(cache_path):
os.unlink(cache_path)
self.logger.debug('deleted cache file: %s', cache_path)
return
with atomic_writer(cache_path, 'wb') as file_obj:
serializer.dump(data, file_obj)
self.logger.debug('cached data: %s', cache_path)
def cached_data_fresh(self, name, max_age):
"""Whether cache `name` is less than `max_age` seconds old.
:param name: name of datastore
:param max_age: maximum age of data in seconds
:type max_age: ``int``
:returns: ``True`` if data is less than ``max_age`` old, else
``False``
"""
age = self.cached_data_age(name)
if not age:
return False
return age < max_age
def cached_data_age(self, name):
"""Return age in seconds of cache `name` or 0 if cache doesn't exist.
:param name: name of datastore
:type name: ``unicode``
:returns: age of datastore in seconds
:rtype: ``int``
"""
cache_path = self.cachefile('%s.%s' % (name, self.cache_serializer))
if not os.path.exists(cache_path):
return 0
return time.time() - os.stat(cache_path).st_mtime
def filter(self, query, items, key=lambda x: x, ascending=False,
include_score=False, min_score=0, max_results=0,
match_on=MATCH_ALL, fold_diacritics=True):
"""Fuzzy search filter. Returns list of ``items`` that match ``query``.
``query`` is case-insensitive. Any item that does not contain the
entirety of ``query`` is rejected.
If ``query`` is an empty string or contains only whitespace,
all items will match.
:param query: query to test items against
:type query: ``unicode``
:param items: iterable of items to test
:type items: ``list`` or ``tuple``
:param key: function to get comparison key from ``items``.
Must return a ``unicode`` string. The default simply returns
the item.
:type key: ``callable``
:param ascending: set to ``True`` to get worst matches first
:type ascending: ``Boolean``
:param include_score: Useful for debugging the scoring algorithm.
If ``True``, results will be a list of tuples
``(item, score, rule)``.
:type include_score: ``Boolean``
:param min_score: If non-zero, ignore results with a score lower
than this.
:type min_score: ``int``
:param max_results: If non-zero, prune results list to this length.
:type max_results: ``int``
:param match_on: Filter option flags. Bitwise-combined list of
``MATCH_*`` constants (see below).
:type match_on: ``int``
:param fold_diacritics: Convert search keys to ASCII-only
characters if ``query`` only contains ASCII characters.
:type fold_diacritics: ``Boolean``
:returns: list of ``items`` matching ``query`` or list of
``(item, score, rule)`` `tuples` if ``include_score`` is ``True``.
``rule`` is the ``MATCH_*`` rule that matched the item.
:rtype: ``list``
**Matching rules**
By default, :meth:`filter` uses all of the following flags (i.e.
:const:`MATCH_ALL`). The tests are always run in the given order:
1. :const:`MATCH_STARTSWITH`
Item search key starts with ``query`` (case-insensitive).
2. :const:`MATCH_CAPITALS`
The list of capital letters in item search key starts with
``query`` (``query`` may be lower-case). E.g., ``of``
would match ``OmniFocus``, ``gc`` would match ``Google Chrome``.
3. :const:`MATCH_ATOM`
Search key is split into "atoms" on non-word characters
(.,-,' etc.). Matches if ``query`` is one of these atoms
(case-insensitive).
4. :const:`MATCH_INITIALS_STARTSWITH`
Initials are the first characters of the above-described
"atoms" (case-insensitive).
5. :const:`MATCH_INITIALS_CONTAIN`
``query`` is a substring of the above-described initials.
6. :const:`MATCH_INITIALS`
Combination of (4) and (5).
7. :const:`MATCH_SUBSTRING`
``query`` is a substring of item search key (case-insensitive).
8. :const:`MATCH_ALLCHARS`
All characters in ``query`` appear in item search key in
the same order (case-insensitive).
9. :const:`MATCH_ALL`
Combination of all the above.
:const:`MATCH_ALLCHARS` is considerably slower than the other
tests and provides much less accurate results.
**Examples:**
To ignore :const:`MATCH_ALLCHARS` (tends to provide the worst
matches and is expensive to run), use
``match_on=MATCH_ALL ^ MATCH_ALLCHARS``.
To match only on capitals, use ``match_on=MATCH_CAPITALS``.
To match only on startswith and substring, use
``match_on=MATCH_STARTSWITH | MATCH_SUBSTRING``.
**Diacritic folding**
.. versionadded:: 1.3
If ``fold_diacritics`` is ``True`` (the default), and ``query``
contains only ASCII characters, non-ASCII characters in search keys
will be converted to ASCII equivalents (e.g. **ü** -> **u**,
**ß** -> **ss**, **é** -> **e**).
See :const:`ASCII_REPLACEMENTS` for all replacements.
If ``query`` contains non-ASCII characters, search keys will not be
altered.
"""
if not query:
return items
# Remove preceding/trailing spaces
query = query.strip()
if not query:
return items
# Use user override if there is one
fold_diacritics = self.settings.get('__workflow_diacritic_folding',
fold_diacritics)
results = []
for item in items:
skip = False
score = 0
words = [s.strip() for s in query.split(' ')]
value = key(item).strip()
if value == '':
continue
for word in words:
if word == '':
continue
s, rule = self._filter_item(value, word, match_on,
fold_diacritics)
if not s: # Skip items that don't match part of the query
skip = True
score += s
if skip:
continue
if score:
# use "reversed" `score` (i.e. highest becomes lowest) and
# `value` as sort key. This means items with the same score
# will be sorted in alphabetical not reverse alphabetical order
results.append(((100.0 / score, value.lower(), score),
(item, score, rule)))
# sort on keys, then discard the keys
results.sort(reverse=ascending)
results = [t[1] for t in results]
if min_score:
results = [r for r in results if r[1] > min_score]
if max_results and len(results) > max_results:
results = results[:max_results]
# return list of ``(item, score, rule)``
if include_score:
return results
# just return list of items
return [t[0] for t in results]
def _filter_item(self, value, query, match_on, fold_diacritics):
"""Filter ``value`` against ``query`` using rules ``match_on``.
:returns: ``(score, rule)``
"""
query = query.lower()
if not isascii(query):
fold_diacritics = False
if fold_diacritics:
value = self.fold_to_ascii(value)
# pre-filter any items that do not contain all characters
# of ``query`` to save on running several more expensive tests
if not set(query) <= set(value.lower()):
return (0, None)
# item starts with query
if match_on & MATCH_STARTSWITH and value.lower().startswith(query):
score = 100.0 - (len(value) / len(query))
return (score, MATCH_STARTSWITH)
# query matches capitalised letters in item,
# e.g. of = OmniFocus
if match_on & MATCH_CAPITALS:
initials = ''.join([c for c in value if c in INITIALS])
if initials.lower().startswith(query):
score = 100.0 - (len(initials) / len(query))
return (score, MATCH_CAPITALS)
# split the item into "atoms", i.e. words separated by
# spaces or other non-word characters
if (match_on & MATCH_ATOM or
match_on & MATCH_INITIALS_CONTAIN or
match_on & MATCH_INITIALS_STARTSWITH):
atoms = [s.lower() for s in split_on_delimiters(value)]
# print('atoms : %s --> %s' % (value, atoms))
# initials of the atoms
initials = ''.join([s[0] for s in atoms if s])
if match_on & MATCH_ATOM:
# is `query` one of the atoms in item?
# similar to substring, but scores more highly, as it's
# a word within the item
if query in atoms:
score = 100.0 - (len(value) / len(query))
return (score, MATCH_ATOM)
# `query` matches start (or all) of the initials of the
# atoms, e.g. ``himym`` matches "How I Met Your Mother"
# *and* "how i met your mother" (the ``capitals`` rule only
# matches the former)
if (match_on & MATCH_INITIALS_STARTSWITH and
initials.startswith(query)):
score = 100.0 - (len(initials) / len(query))
return (score, MATCH_INITIALS_STARTSWITH)
# `query` is a substring of initials, e.g. ``doh`` matches
# "The Dukes of Hazzard"
elif (match_on & MATCH_INITIALS_CONTAIN and
query in initials):
score = 95.0 - (len(initials) / len(query))
return (score, MATCH_INITIALS_CONTAIN)
# `query` is a substring of item
if match_on & MATCH_SUBSTRING and query in value.lower():
score = 90.0 - (len(value) / len(query))
return (score, MATCH_SUBSTRING)
# finally, assign a score based on how close together the
# characters in `query` are in item.
if match_on & MATCH_ALLCHARS:
search = self._search_for_query(query)
match = search(value)
if match:
score = 100.0 / ((1 + match.start()) *
(match.end() - match.start() + 1))
return (score, MATCH_ALLCHARS)
# Nothing matched
return (0, None)
def _search_for_query(self, query):
if query in self._search_pattern_cache:
return self._search_pattern_cache[query]
# Build pattern: include all characters
pattern = []
for c in query:
# pattern.append('[^{0}]*{0}'.format(re.escape(c)))
pattern.append('.*?{0}'.format(re.escape(c)))
pattern = ''.join(pattern)
search = re.compile(pattern, re.IGNORECASE).search
self._search_pattern_cache[query] = search
return search
def run(self, func, text_errors=False):
"""Call ``func`` to run your workflow.
:param func: Callable to call with ``self`` (i.e. the :class:`Workflow`
instance) as first argument.
:param text_errors: Emit error messages in plain text, not in
Alfred's XML/JSON feedback format. Use this when you're not
running Alfred-Workflow in a Script Filter and would like
to pass the error message to, say, a notification.
:type text_errors: ``Boolean``
``func`` will be called with :class:`Workflow` instance as first
argument.
``func`` should be the main entry point to your workflow.
Any exceptions raised will be logged and an error message will be
output to Alfred.
"""
start = time.time()
# Call workflow's entry function/method within a try-except block
# to catch any errors and display an error message in Alfred
try:
if self.version:
self.logger.debug('workflow version: %s', self.version)
# Run update check if configured for self-updates.
# This call has to go in the `run` try-except block, as it will
# initialise `self.settings`, which will raise an exception
# if `settings.json` isn't valid.
if self._update_settings:
self.check_update()
# Run workflow's entry function/method
func(self)
# Set last version run to current version after a successful
# run
self.set_last_version()
except Exception as err:
self.logger.exception(err)
if self.help_url:
self.logger.info('for assistance, see: %s', self.help_url)
if not sys.stdout.isatty(): # Show error in Alfred
if text_errors:
print(unicode(err).encode('utf-8'), end='')
else:
self._items = []
if self._name:
name = self._name
elif self._bundleid:
name = self._bundleid
else: # pragma: no cover
name = os.path.dirname(__file__)
self.add_item("Error in workflow '%s'" % name,
unicode(err),
icon=ICON_ERROR)
self.send_feedback()
return 1
finally:
self.logger.debug('workflow finished in %0.3f seconds',
time.time() - start)
return 0
# Alfred feedback methods ------------------------------------------
def add_item(self, title, subtitle='', modifier_subtitles=None, arg=None,
autocomplete=None, valid=False, uid=None, icon=None,
icontype=None, type=None, largetext=None, copytext=None,
quicklookurl=None):
"""Add an item to be output to Alfred.
:param title: Title shown in Alfred
:type title: ``unicode``
:param subtitle: Subtitle shown in Alfred
:type subtitle: ``unicode``
:param modifier_subtitles: Subtitles shown when modifier
(CMD, OPT etc.) is pressed. Use a ``dict`` with the lowercase
keys ``cmd``, ``ctrl``, ``shift``, ``alt`` and ``fn``
:type modifier_subtitles: ``dict``
:param arg: Argument passed by Alfred as ``{query}`` when item is
actioned
:type arg: ``unicode``
:param autocomplete: Text expanded in Alfred when item is TABbed
:type autocomplete: ``unicode``
:param valid: Whether or not item can be actioned
:type valid: ``Boolean``
:param uid: Used by Alfred to remember/sort items
:type uid: ``unicode``
:param icon: Filename of icon to use
:type icon: ``unicode``
:param icontype: Type of icon. Must be one of ``None`` , ``'filetype'``
or ``'fileicon'``. Use ``'filetype'`` when ``icon`` is a filetype
such as ``'public.folder'``. Use ``'fileicon'`` when you wish to
use the icon of the file specified as ``icon``, e.g.
``icon='/Applications/Safari.app', icontype='fileicon'``.
Leave as `None` if ``icon`` points to an actual
icon file.
:type icontype: ``unicode``
:param type: Result type. Currently only ``'file'`` is supported
(by Alfred). This will tell Alfred to enable file actions for
this item.
:type type: ``unicode``
:param largetext: Text to be displayed in Alfred's large text box
if user presses CMD+L on item.
:type largetext: ``unicode``
:param copytext: Text to be copied to pasteboard if user presses
CMD+C on item.
:type copytext: ``unicode``
:param quicklookurl: URL to be displayed using Alfred's Quick Look
feature (tapping ``SHIFT`` or ``⌘+Y`` on a result).
:type quicklookurl: ``unicode``
:returns: :class:`Item` instance
See :ref:`icons` for a list of the supported system icons.
.. note::
Although this method returns an :class:`Item` instance, you don't
need to hold onto it or worry about it. All generated :class:`Item`
instances are also collected internally and sent to Alfred when
:meth:`send_feedback` is called.
The generated :class:`Item` is only returned in case you want to
edit it or do something with it other than send it to Alfred.
"""
item = self.item_class(title, subtitle, modifier_subtitles, arg,
autocomplete, valid, uid, icon, icontype, type,
largetext, copytext, quicklookurl)
self._items.append(item)
return item
def send_feedback(self):
"""Print stored items to console/Alfred as XML."""
root = ET.Element('items')
for item in self._items:
root.append(item.elem)
sys.stdout.write('<?xml version="1.0" encoding="utf-8"?>\n')
sys.stdout.write(ET.tostring(root).encode('utf-8'))
sys.stdout.flush()
####################################################################
# Updating methods
####################################################################
@property
def first_run(self):
"""Return ``True`` if it's the first time this version has run.
.. versionadded:: 1.9.10
Raises a :class:`ValueError` if :attr:`version` isn't set.
"""
if not self.version:
raise ValueError('No workflow version set')
if not self.last_version_run:
return True
return self.version != self.last_version_run
@property
def last_version_run(self):
"""Return version of last version to run (or ``None``).
.. versionadded:: 1.9.10
:returns: :class:`~workflow.update.Version` instance
or ``None``
"""
if self._last_version_run is UNSET:
version = self.settings.get('__workflow_last_version')
if version:
from update import Version
version = Version(version)
self._last_version_run = version
self.logger.debug('last run version: %s', self._last_version_run)
return self._last_version_run
def set_last_version(self, version=None):
"""Set :attr:`last_version_run` to current version.
.. versionadded:: 1.9.10
:param version: version to store (default is current version)
:type version: :class:`~workflow.update.Version` instance
or ``unicode``
:returns: ``True`` if version is saved, else ``False``
"""
if not version:
if not self.version:
self.logger.warning(
"Can't save last version: workflow has no version")
return False
version = self.version
if isinstance(version, basestring):
from update import Version
version = Version(version)
self.settings['__workflow_last_version'] = str(version)
self.logger.debug('set last run version: %s', version)
return True
@property
def update_available(self):
"""Whether an update is available.
.. versionadded:: 1.9
See :ref:`guide-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:returns: ``True`` if an update is available, else ``False``
"""
# Create a new workflow object to ensure standard serialiser
# is used (update.py is called without the user's settings)
update_data = Workflow().cached_data('__workflow_update_status',
max_age=0)
self.logger.debug('update_data: %r', update_data)
if not update_data or not update_data.get('available'):
return False
return update_data['available']
@property
def prereleases(self):
"""Whether workflow should update to pre-release versions.
.. versionadded:: 1.16
:returns: ``True`` if pre-releases are enabled with the :ref:`magic
argument <magic-arguments>` or the ``update_settings`` dict, else
``False``.
"""
if self._update_settings.get('prereleases'):
return True
return self.settings.get('__workflow_prereleases') or False
def check_update(self, force=False):
"""Call update script if it's time to check for a new release.
.. versionadded:: 1.9
The update script will be run in the background, so it won't
interfere in the execution of your workflow.
See :ref:`guide-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:param force: Force update check
:type force: ``Boolean``
"""
frequency = self._update_settings.get('frequency',
DEFAULT_UPDATE_FREQUENCY)
if not force and not self.settings.get('__workflow_autoupdate', True):
self.logger.debug('Auto update turned off by user')
return
# Check for new version if it's time
if (force or not self.cached_data_fresh(
'__workflow_update_status', frequency * 86400)):
github_slug = self._update_settings['github_slug']
# version = self._update_settings['version']
version = str(self.version)
from background import run_in_background
# update.py is adjacent to this file
update_script = os.path.join(os.path.dirname(__file__),
b'update.py')
cmd = ['/usr/bin/python', update_script, 'check', github_slug,
version]
if self.prereleases:
cmd.append('--prereleases')
self.logger.info('Checking for update ...')
run_in_background('__workflow_update_check', cmd)
else:
self.logger.debug('Update check not due')
def start_update(self):
"""Check for update and download and install new workflow file.
.. versionadded:: 1.9
See :ref:`guide-updates` in the :ref:`user-manual` for detailed
information on how to enable your workflow to update itself.
:returns: ``True`` if an update is available and will be
installed, else ``False``
"""
import update
github_slug = self._update_settings['github_slug']
# version = self._update_settings['version']
version = str(self.version)
if not update.check_update(github_slug, version, self.prereleases):
return False
from background import run_in_background
# update.py is adjacent to this file
update_script = os.path.join(os.path.dirname(__file__),
b'update.py')
cmd = ['/usr/bin/python', update_script, 'install', github_slug,
version]
if self.prereleases:
cmd.append('--prereleases')
self.logger.debug('Downloading update ...')
run_in_background('__workflow_update_install', cmd)
return True
####################################################################
# Keychain password storage methods
####################################################################
def save_password(self, account, password, service=None):
"""Save account credentials.
If the account exists, the old password will first be deleted
(Keychain throws an error otherwise).
If something goes wrong, a :class:`KeychainError` exception will
be raised.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param password: the password to secure
:type password: ``unicode``
:param service: Name of the service. By default, this is the
workflow's bundle ID
:type service: ``unicode``
"""
if not service:
service = self.bundleid
try:
self._call_security('add-generic-password', service, account,
'-w', password)
self.logger.debug('Saved password : %s:%s', service, account)
except PasswordExists:
self.logger.debug('Password exists : %s:%s', service, account)
current_password = self.get_password(account, service)
if current_password == password:
self.logger.debug('Password unchanged')
else:
self.delete_password(account, service)
self._call_security('add-generic-password', service,
account, '-w', password)
self.logger.debug('save_password : %s:%s', service, account)
def get_password(self, account, service=None):
"""Retrieve the password saved at ``service/account``.
Raise :class:`PasswordNotFound` exception if password doesn't exist.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param service: Name of the service. By default, this is the workflow's
bundle ID
:type service: ``unicode``
:returns: account password
:rtype: ``unicode``
"""
if not service:
service = self.bundleid
output = self._call_security('find-generic-password', service,
account, '-g')
# Parsing of `security` output is adapted from python-keyring
# by Jason R. Coombs
# https://pypi.python.org/pypi/keyring
m = re.search(
r'password:\s*(?:0x(?P<hex>[0-9A-F]+)\s*)?(?:"(?P<pw>.*)")?',
output)
if m:
groups = m.groupdict()
h = groups.get('hex')
password = groups.get('pw')
if h:
password = unicode(binascii.unhexlify(h), 'utf-8')
self.logger.debug('Got password : %s:%s', service, account)
return password
def delete_password(self, account, service=None):
"""Delete the password stored at ``service/account``.
Raise :class:`PasswordNotFound` if account is unknown.
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param service: Name of the service. By default, this is the workflow's
bundle ID
:type service: ``unicode``
"""
if not service:
service = self.bundleid
self._call_security('delete-generic-password', service, account)
self.logger.debug('Deleted password : %s:%s', service, account)
####################################################################
# Methods for workflow:* magic args
####################################################################
def _register_default_magic(self):
"""Register the built-in magic arguments."""
# TODO: refactor & simplify
# Wrap callback and message with callable
def callback(func, msg):
def wrapper():
func()
return msg
return wrapper
self.magic_arguments['delcache'] = callback(self.clear_cache,
'Deleted workflow cache')
self.magic_arguments['deldata'] = callback(self.clear_data,
'Deleted workflow data')
self.magic_arguments['delsettings'] = callback(
self.clear_settings, 'Deleted workflow settings')
self.magic_arguments['reset'] = callback(self.reset,
'Reset workflow')
self.magic_arguments['openlog'] = callback(self.open_log,
'Opening workflow log file')
self.magic_arguments['opencache'] = callback(
self.open_cachedir, 'Opening workflow cache directory')
self.magic_arguments['opendata'] = callback(
self.open_datadir, 'Opening workflow data directory')
self.magic_arguments['openworkflow'] = callback(
self.open_workflowdir, 'Opening workflow directory')
self.magic_arguments['openterm'] = callback(
self.open_terminal, 'Opening workflow root directory in Terminal')
# Diacritic folding
def fold_on():
self.settings['__workflow_diacritic_folding'] = True
return 'Diacritics will always be folded'
def fold_off():
self.settings['__workflow_diacritic_folding'] = False
return 'Diacritics will never be folded'
def fold_default():
if '__workflow_diacritic_folding' in self.settings:
del self.settings['__workflow_diacritic_folding']
return 'Diacritics folding reset'
self.magic_arguments['foldingon'] = fold_on
self.magic_arguments['foldingoff'] = fold_off
self.magic_arguments['foldingdefault'] = fold_default
# Updates
def update_on():
self.settings['__workflow_autoupdate'] = True
return 'Auto update turned on'
def update_off():
self.settings['__workflow_autoupdate'] = False
return 'Auto update turned off'
def prereleases_on():
self.settings['__workflow_prereleases'] = True
return 'Prerelease updates turned on'
def prereleases_off():
self.settings['__workflow_prereleases'] = False
return 'Prerelease updates turned off'
def do_update():
if self.start_update():
return 'Downloading and installing update ...'
else:
return 'No update available'
self.magic_arguments['autoupdate'] = update_on
self.magic_arguments['noautoupdate'] = update_off
self.magic_arguments['prereleases'] = prereleases_on
self.magic_arguments['noprereleases'] = prereleases_off
self.magic_arguments['update'] = do_update
# Help
def do_help():
if self.help_url:
self.open_help()
return 'Opening workflow help URL in browser'
else:
return 'Workflow has no help URL'
def show_version():
if self.version:
return 'Version: {0}'.format(self.version)
else:
return 'This workflow has no version number'
def list_magic():
"""Display all available magic args in Alfred."""
isatty = sys.stderr.isatty()
for name in sorted(self.magic_arguments.keys()):
if name == 'magic':
continue
arg = self.magic_prefix + name
self.logger.debug(arg)
if not isatty:
self.add_item(arg, icon=ICON_INFO)
if not isatty:
self.send_feedback()
self.magic_arguments['help'] = do_help
self.magic_arguments['magic'] = list_magic
self.magic_arguments['version'] = show_version
def clear_cache(self, filter_func=lambda f: True):
"""Delete all files in workflow's :attr:`cachedir`.
:param filter_func: Callable to determine whether a file should be
deleted or not. ``filter_func`` is called with the filename
of each file in the data directory. If it returns ``True``,
the file will be deleted.
By default, *all* files will be deleted.
:type filter_func: ``callable``
"""
self._delete_directory_contents(self.cachedir, filter_func)
def clear_data(self, filter_func=lambda f: True):
"""Delete all files in workflow's :attr:`datadir`.
:param filter_func: Callable to determine whether a file should be
deleted or not. ``filter_func`` is called with the filename
of each file in the data directory. If it returns ``True``,
the file will be deleted.
By default, *all* files will be deleted.
:type filter_func: ``callable``
"""
self._delete_directory_contents(self.datadir, filter_func)
def clear_settings(self):
"""Delete workflow's :attr:`settings_path`."""
if os.path.exists(self.settings_path):
os.unlink(self.settings_path)
self.logger.debug('Deleted : %r', self.settings_path)
def reset(self):
"""Delete workflow settings, cache and data.
File :attr:`settings <settings_path>` and directories
:attr:`cache <cachedir>` and :attr:`data <datadir>` are deleted.
"""
self.clear_cache()
self.clear_data()
self.clear_settings()
def open_log(self):
"""Open :attr:`logfile` in default app (usually Console.app)."""
subprocess.call(['open', self.logfile])
def open_cachedir(self):
"""Open the workflow's :attr:`cachedir` in Finder."""
subprocess.call(['open', self.cachedir])
def open_datadir(self):
"""Open the workflow's :attr:`datadir` in Finder."""
subprocess.call(['open', self.datadir])
def open_workflowdir(self):
"""Open the workflow's :attr:`workflowdir` in Finder."""
subprocess.call(['open', self.workflowdir])
def open_terminal(self):
"""Open a Terminal window at workflow's :attr:`workflowdir`."""
subprocess.call(['open', '-a', 'Terminal',
self.workflowdir])
def open_help(self):
"""Open :attr:`help_url` in default browser."""
subprocess.call(['open', self.help_url])
return 'Opening workflow help URL in browser'
####################################################################
# Helper methods
####################################################################
def decode(self, text, encoding=None, normalization=None):
"""Return ``text`` as normalised unicode.
If ``encoding`` and/or ``normalization`` is ``None``, the
``input_encoding``and ``normalization`` parameters passed to
:class:`Workflow` are used.
:param text: string
:type text: encoded or Unicode string. If ``text`` is already a
Unicode string, it will only be normalised.
:param encoding: The text encoding to use to decode ``text`` to
Unicode.
:type encoding: ``unicode`` or ``None``
:param normalization: The nomalisation form to apply to ``text``.
:type normalization: ``unicode`` or ``None``
:returns: decoded and normalised ``unicode``
:class:`Workflow` uses "NFC" normalisation by default. This is the
standard for Python and will work well with data from the web (via
:mod:`~workflow.web` or :mod:`json`).
macOS, on the other hand, uses "NFD" normalisation (nearly), so data
coming from the system (e.g. via :mod:`subprocess` or
:func:`os.listdir`/:mod:`os.path`) may not match. You should either
normalise this data, too, or change the default normalisation used by
:class:`Workflow`.
"""
encoding = encoding or self._input_encoding
normalization = normalization or self._normalizsation
if not isinstance(text, unicode):
text = unicode(text, encoding)
return unicodedata.normalize(normalization, text)
def fold_to_ascii(self, text):
"""Convert non-ASCII characters to closest ASCII equivalent.
.. versionadded:: 1.3
.. note:: This only works for a subset of European languages.
:param text: text to convert
:type text: ``unicode``
:returns: text containing only ASCII characters
:rtype: ``unicode``
"""
if isascii(text):
return text
text = ''.join([ASCII_REPLACEMENTS.get(c, c) for c in text])
return unicode(unicodedata.normalize('NFKD',
text).encode('ascii', 'ignore'))
def dumbify_punctuation(self, text):
"""Convert non-ASCII punctuation to closest ASCII equivalent.
This method replaces "smart" quotes and n- or m-dashes with their
workaday ASCII equivalents. This method is currently not used
internally, but exists as a helper method for workflow authors.
.. versionadded: 1.9.7
:param text: text to convert
:type text: ``unicode``
:returns: text with only ASCII punctuation
:rtype: ``unicode``
"""
if isascii(text):
return text
text = ''.join([DUMB_PUNCTUATION.get(c, c) for c in text])
return text
def _delete_directory_contents(self, dirpath, filter_func):
"""Delete all files in a directory.
:param dirpath: path to directory to clear
:type dirpath: ``unicode`` or ``str``
:param filter_func function to determine whether a file shall be
deleted or not.
:type filter_func ``callable``
"""
if os.path.exists(dirpath):
for filename in os.listdir(dirpath):
if not filter_func(filename):
continue
path = os.path.join(dirpath, filename)
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
self.logger.debug('Deleted : %r', path)
def _load_info_plist(self):
"""Load workflow info from ``info.plist``."""
# info.plist should be in the directory above this one
self._info = plistlib.readPlist(self.workflowfile('info.plist'))
self._info_loaded = True
def _create(self, dirpath):
"""Create directory `dirpath` if it doesn't exist.
:param dirpath: path to directory
:type dirpath: ``unicode``
:returns: ``dirpath`` argument
:rtype: ``unicode``
"""
if not os.path.exists(dirpath):
os.makedirs(dirpath)
return dirpath
def _call_security(self, action, service, account, *args):
"""Call ``security`` CLI program that provides access to keychains.
May raise `PasswordNotFound`, `PasswordExists` or `KeychainError`
exceptions (the first two are subclasses of `KeychainError`).
:param action: The ``security`` action to call, e.g.
``add-generic-password``
:type action: ``unicode``
:param service: Name of the service.
:type service: ``unicode``
:param account: name of the account the password is for, e.g.
"Pinboard"
:type account: ``unicode``
:param password: the password to secure
:type password: ``unicode``
:param *args: list of command line arguments to be passed to
``security``
:type *args: `list` or `tuple`
:returns: ``(retcode, output)``. ``retcode`` is an `int`, ``output`` a
``unicode`` string.
:rtype: `tuple` (`int`, ``unicode``)
"""
cmd = ['security', action, '-s', service, '-a', account] + list(args)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
if p.returncode == 44: # password does not exist
raise PasswordNotFound()
elif p.returncode == 45: # password already exists
raise PasswordExists()
elif p.returncode > 0:
err = KeychainError('Unknown Keychain error : %s' % stdout)
err.retcode = p.returncode
raise err
return stdout.strip().decode('utf-8')
|
mit
| -8,473,999,698,847,352,000 | 31.602193 | 94 | 0.547717 | false |
InUrSys/PescArt2.0
|
src/srcPlus/GenericPesquisas.py
|
1
|
10030
|
'''
Created on 13/12/2017
@author: chernomirdinmacuvele
'''
from PyQt5.Qt import QDialog, QModelIndex, QStandardItemModel, QStandardItem,\
QGroupBox
import mixedModel
import QT_tblViewUtility
import rscForm
import frmPesquisa_Sort
class GenericPesquisas(QDialog):
def configCombox(self):
'''
Metodo para configurar o Modelo e Text Hint no Combox.
'''
lstWdgt = self.dictCB['widget']
lstQuer = self.dictCB['quer']
for idx, val in enumerate (lstWdgt):
model = mixedModel.setQueryModel(lstQuer[idx])
val.setModel(model)
val.setModelColumn(1)
self.CBTextHint(Combox=val)
def CBTextHint(self, Combox=None):
mdel = QStandardItemModel(Combox.model())
firstIndex = mdel.index(0, Combox.modelColumn(), Combox.rootModelIndex())
firstItem = QStandardItem(mdel.itemFromIndex(firstIndex))
firstItem.setSelectable(False)
def configComboxLocal(self):
'''
Configurar o combox e o evento
'''
lstWdgt = [self.CBProvincia, self.CBDistrito, self.CBPosto, self.CBCentroPesca]
for wdg in lstWdgt:
wdgName = wdg.objectName()
query= self.dictLocal[wdgName]['query']
model= mixedModel.setQueryModel(query= query)
wdg.setModel(model)
wdg.setModelColumn(1)
self.CBTextHint(Combox=wdg)
wdg.currentTextChanged.connect(self.updateNextCombo)
def updateNextCombo(self):
'''
Atualiza o modelo do Next widget
'''
wdgt= self.sender().objectName()
setNext= self.dictLocal[wdgt]['nextLVL']
if setNext is not None:
query= self.dictLocal[setNext]['query'].format(val = mixedModel.getDataCombox(widg= self.sender()))
nextWdg = self.dictLocal[setNext]['wdgt']
nextWdgModel= nextWdg.model()
nextWdgModel.setQuery(query)
nextWdg.setCurrentIndex(0)
self.CBTextHint(Combox=self.dictLocal[setNext]['wdgt'])
def configRegistador(self):
if self.CBProvincia.currentIndex() == 0:
quer= "select null as id, '-Registador-' as nome union all select id, nome from ref_registador"
else:
id_prov = mixedModel.getDataCombox(widg= self.CBProvincia)
quer = "select null as id, '-Registador-' as nome union all select id, nome from ref_registador where id_centro = '{prov}'".format(prov = id_prov)
model = mixedModel.setQueryModel(query =quer)
self.CBRegistador.setModel(model)
self.CBRegistador.setModelColumn(1)
def buldingTheQuery(self):
startQuery = """SELECT tbl1.id, date(data_amostragem) as "Data da Amostra", tbl2.nome as "Centro", tbl3.nome as "Registador",
hora_inicioamo, hor_fimamo, tbl4.nome as "Dia da Semana", tbl5.nome as "Forca do Vento", tbl6.nome as "Nivel da Mare",
tbl7.nome as "Direcao do Vento", hora_vento, tbl8.nome as "Tipo de Mare", altura_preamar, hora_preamar, altura_baimar,
hora_baixamar, tbl9.nome as "Fase da Lua", tbl10.nome as "Nebulosidade", hora_nebulosidade, actividade_pesq,
total_artes_amos, total_artes_act, total_artes_n_activas, total_artes_prov_outo_cent, observacoes
FROM public.t_saidas as tbl1
left join ref_geometric as tbl2
on tbl1.id_centro = tbl2.id and tbl2.id_tiplocal = 'CTP'
left join ref_registador as tbl3
on tbl1.id_registrador = tbl3.id
left join ref_diasemana as tbl4
on tbl1.id_diasemana = tbl4.id
left join ref_table as tbl5
on tbl1.id_forcavento = tbl5.id and tbl5.id_grupo = 'FCV'
left join ref_table as tbl6
on tbl1.id_estadomare = tbl6.id and tbl6.id_grupo = 'NVM'
left join ref_table as tbl7
on tbl1.id_direccao = tbl7.id and tbl7.id_grupo = 'DDV'
left join ref_table as tbl8
on tbl1.id_tipomare = tbl8.id and tbl8.id_grupo = 'TPM'
left join ref_table as tbl9
on tbl1.id_faselua = tbl9.id and tbl9.id_grupo = 'FLD'
left join ref_table as tbl10
on tbl1.id_nebulosidade = tbl10.id and tbl10.id_grupo = 'NBL' """#BigQuery and start Where
#
#
if self.CBProvincia.currentIndex() != 0:
startQuery += " where "
if self.CBDistrito.currentIndex() != 0:
if self.CBPosto.currentIndex() != 0:
if self.CBCentroPesca.currentIndex() != 0:
ctp = mixedModel.getDataCombox(widg= self.CBCentroPesca)
startQuery += "tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1 where tbl1.id = '{ctp}')".format(ctp = ctp)
else:
psd = mixedModel.getDataCombox(widg= self.CBPosto)
startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1
inner join ref_geometric as tbl2
on tbl1.id_parent = tbl2.id
where tbl2.id like '{psd}') """.format(psd = psd)
else:
dst = mixedModel.getDataCombox(widg= self.CBDistrito)
startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1
inner join ref_geometric as tbl2
on tbl1.id_parent = tbl2.id
inner join ref_geometric as tbl3
on tbl2.id_parent = tbl3.id
where tbl3.id like '{dst}') """.format(dst = dst)
else:
prv = mixedModel.getDataCombox(widg= self.CBProvincia)
startQuery += """ tbl1.id_centro in (select tbl1.id from ref_geometric as tbl1
inner join ref_geometric as tbl2
on tbl1.id_parent = tbl2.id
inner join ref_geometric as tbl3
on tbl2.id_parent = tbl3.id
inner join ref_geometric as tbl4
on tbl3.id_parent = tbl4.id
where tbl4.id like '{prv}') """.format(prv = prv)
#
#
if self.GBData.isChecked():
if self.CBProvincia.currentIndex(): #!= 0 or self.CBRegistador.currentIndex() != 0 or self.CBDiaSemana.currentIndex() != 0 or self.CBActividadePesqueria.currentIndex() != 0:
startQuery += ' and '
else:
startQuery += " where "
inicio = rscForm.getText(widg = self.DEInicio)
fim = rscForm.getText(widg = self.DEFim)
startQuery += "data_amostragem between '{inicio}' and '{fim}' ".format(inicio=inicio, fim=fim)
#
#
if self.CBRegistador.currentIndex() != 0:
if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked():#or self.CBDiaSemana.currentIndex() != 0 or self.CBActividadePesqueria.currentIndex() != 0:
startQuery += ' and '
else:
startQuery += " where "
rgt = mixedModel.getDataCombox(widg= self.CBRegistador)
startQuery += "tbl3.id = '{rgt}' ".format(rgt = rgt)
#
#
if self.CBDiaSemana.currentIndex() != 0:
if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked() or self.CBRegistador.currentIndex() != 0:# or self.CBActividadePesqueria.currentIndex() != 0:
startQuery += ' and '
else:
startQuery += " where "
dsm = mixedModel.getDataCombox(widg= self.CBDiaSemana)
startQuery += "tbl4.id = '{dsm}' ".format(dsm = dsm)
#
#
if self.CBActividadePesqueria.currentIndex() != 0:
if self.CBProvincia.currentIndex() != 0 or self.GBData.isChecked() or self.CBRegistador.currentIndex() != 0 or self.CBDiaSemana.currentIndex() != 0:
startQuery += ' and '
else:
startQuery += " where "
quer = mixedModel.getDataCombox(widg = self.CBActividadePesqueria)
startQuery += quer
#
#
try:
if self.endQuery != " ":
startQuery += "order by "
startQuery += self.endQuery
except AttributeError:
startQuery += " order by data_amostragem "
lstName = self.dictSaidas['newNames']
model = mixedModel.setQueryModel(query= startQuery, lstNewNames= lstName)
toHide = self.dictSaidas['toHide']
lstSizeCol = self.dictSaidas['sizeCol']
QT_tblViewUtility.setModelInView(tblView= self.TVSaidas, ViewModel= model, toHide = toHide)
QT_tblViewUtility.setViewCustom(tblView=self.TVSaidas, lstSizeCol=lstSizeCol)
def selectedRow(self, mIdx):
lstOut=[]
lenDict = len(self.dictSaidas['fldName'])
model = mIdx.model()
clickedRow = mIdx.row()
for idx in range(lenDict):
val = model.record(clickedRow).value(idx)
lstOut.append(val)
self.lstVal= lstOut
self.bOK = True
def toOpenSort(self):
dlg = frmPesquisa_Sort.frmSortting()
dlg.exec_()
self.endQuery = dlg.fQuery
|
gpl-3.0
| -1,872,132,791,458,829,000 | 48.905473 | 185 | 0.54008 | false |
clemenshage/grslra
|
smmprod/__init__.py
|
1
|
1426
|
# # -*- coding: utf-8 -*-
import numpy as np
from . import _smmprod
def smmprod_c(A, B, Omega):
# out wird hier preallocated, kann in Schleifen dann wiederverwendet werden
out = np.zeros(Omega[0].shape[0])
_smmprod.smmprod(A, B, Omega, out)
return out
# def smmprod(A, B, Omega):
# A_rows = A[Omega[0]]
# B_cols = B.T[Omega[1]]
# return np.sum(A_rows * B_cols, axis=1)
#
#
# def smmprod2(A, B, Omega):
# A_rows = A[Omega[0]]
# B_cols = B.T[Omega[1]]
# # Inplace Multiplikation nach A_rows, damit fällt Speicher Allokation weg
# np.multiply(A_rows, B_cols, A_rows)
# return np.sum(A_rows, axis=1)
#
#
# def smmprod3(A, B, Omega):
# # out wird hier preallocated, kann in Schleifen dann wiederverwendet werden
# out = np.zeros(Omega.shape[1])
# _smmprod.smmprod(A, B, Omega, out)
# return out
#
#
# def smmprod_loop(A, B, Omega):
# card_Omega = np.size(Omega[0])
# result = np.zeros(card_Omega)
# for k in range(card_Omega):
# result[k] = np.dot(A[Omega[0][k]], B.T[Omega[1][k]])
# return result
#
#
# def smmprod_loop2(A, B, Omega):
# card_Omega = np.size(Omega[0])
# result = np.zeros(card_Omega)
# # B nur einmal transponieren
# B = B.T
# # über Omega.T iterieren, günstigere Index-Extraction
# for index, idx in enumerate(Omega.T):
# result[index] = np.dot(A[idx[0]], B[idx[1]])
# return result
|
mit
| -1,816,194,977,066,251,800 | 28.040816 | 81 | 0.602249 | false |
thejens/luigi
|
luigi/worker.py
|
1
|
33476
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The worker communicates with the scheduler and does two things:
1. Sends all tasks that has to be run
2. Gets tasks from the scheduler that should be run
When running in local mode, the worker talks directly to a :py:class:`~luigi.scheduler.CentralPlannerScheduler` instance.
When you run a central server, the worker will talk to the scheduler using a :py:class:`~luigi.rpc.RemoteScheduler` instance.
Everything in this module is private to luigi and may change in incompatible
ways between versions. The exception is the exception types and the
:py:class:`worker` config class.
"""
import collections
import getpass
import logging
import multiprocessing # Note: this seems to have some stability issues: https://github.com/spotify/luigi/pull/438
import os
import signal
try:
import Queue
except ImportError:
import queue as Queue
import random
import socket
import threading
import time
import traceback
import types
from luigi import six
from luigi import notifications
from luigi.event import Event
from luigi.task_register import load_task
from luigi.scheduler import DISABLED, DONE, FAILED, PENDING, CentralPlannerScheduler
from luigi.target import Target
from luigi.task import Task, flatten, getpaths, Config
from luigi.task_register import TaskClassException
from luigi.task_status import RUNNING
from luigi.parameter import FloatParameter, IntParameter, BoolParameter
try:
import simplejson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
# Prevent fork() from being called during a C-level getaddrinfo() which uses a process-global mutex,
# that may not be unlocked in child process, resulting in the process being locked indefinitely.
fork_lock = threading.Lock()
# Why we assert on _WAIT_INTERVAL_EPS:
# multiprocessing.Queue.get() is undefined for timeout=0 it seems:
# https://docs.python.org/3.4/library/multiprocessing.html#multiprocessing.Queue.get.
# I also tried with really low epsilon, but then ran into the same issue where
# the test case "test_external_dependency_worker_is_patient" got stuck. So I
# unscientifically just set the final value to a floating point number that
# "worked for me".
_WAIT_INTERVAL_EPS = 0.00001
class TaskException(Exception):
pass
class TaskProcess(multiprocessing.Process):
""" Wrap all task execution in this class.
Mainly for convenience since this is run in a separate process. """
def __init__(self, task, worker_id, result_queue, random_seed=False, worker_timeout=0,
tracking_url_callback=None):
super(TaskProcess, self).__init__()
self.task = task
self.worker_id = worker_id
self.result_queue = result_queue
self.random_seed = random_seed
self.tracking_url_callback = tracking_url_callback
if task.worker_timeout is not None:
worker_timeout = task.worker_timeout
self.timeout_time = time.time() + worker_timeout if worker_timeout else None
def _run_get_new_deps(self):
run_again = False
try:
task_gen = self.task.run(tracking_url_callback=self.tracking_url_callback)
except TypeError as ex:
if 'unexpected keyword argument' not in getattr(ex, 'message', ex.args[0]):
raise
run_again = True
if run_again:
task_gen = self.task.run()
if not isinstance(task_gen, types.GeneratorType):
return None
next_send = None
while True:
try:
if next_send is None:
requires = six.next(task_gen)
else:
requires = task_gen.send(next_send)
except StopIteration:
return None
new_req = flatten(requires)
new_deps = [(t.task_module, t.task_family, t.to_str_params())
for t in new_req]
if all(t.complete() for t in new_req):
next_send = getpaths(requires)
else:
return new_deps
def run(self):
logger.info('[pid %s] Worker %s running %s', os.getpid(), self.worker_id, self.task)
if self.random_seed:
# Need to have different random seeds if running in separate processes
random.seed((os.getpid(), time.time()))
status = FAILED
expl = ''
missing = []
new_deps = []
try:
# Verify that all the tasks are fulfilled!
missing = [dep.task_id for dep in self.task.deps() if not dep.complete()]
if missing:
deps = 'dependency' if len(missing) == 1 else 'dependencies'
raise RuntimeError('Unfulfilled %s at run time: %s' % (deps, ', '.join(missing)))
self.task.trigger_event(Event.START, self.task)
t0 = time.time()
status = None
if self.task.run == NotImplemented:
# External task
# TODO(erikbern): We should check for task completeness after non-external tasks too!
# This will resolve #814 and make things a lot more consistent
status = DONE if self.task.complete() else FAILED
else:
new_deps = self._run_get_new_deps()
status = DONE if not new_deps else PENDING
if new_deps:
logger.info(
'[pid %s] Worker %s new requirements %s',
os.getpid(), self.worker_id, self.task)
elif status == DONE:
self.task.trigger_event(
Event.PROCESSING_TIME, self.task, time.time() - t0)
expl = self.task.on_success()
logger.info('[pid %s] Worker %s done %s', os.getpid(),
self.worker_id, self.task)
self.task.trigger_event(Event.SUCCESS, self.task)
except KeyboardInterrupt:
raise
except BaseException as ex:
status = FAILED
logger.exception("[pid %s] Worker %s failed %s", os.getpid(), self.worker_id, self.task)
self.task.trigger_event(Event.FAILURE, self.task, ex)
raw_error_message = self.task.on_failure(ex)
expl = raw_error_message
finally:
self.result_queue.put(
(self.task.task_id, status, expl, missing, new_deps))
def _recursive_terminate(self):
import psutil
try:
parent = psutil.Process(self.pid)
children = parent.children(recursive=True)
# terminate parent. Give it a chance to clean up
super(TaskProcess, self).terminate()
parent.wait()
# terminate children
for child in children:
try:
child.terminate()
except psutil.NoSuchProcess:
continue
except psutil.NoSuchProcess:
return
def terminate(self):
"""Terminate this process and its subprocesses."""
# default terminate() doesn't cleanup child processes, it orphans them.
try:
return self._recursive_terminate()
except ImportError:
return super(TaskProcess, self).terminate()
class SingleProcessPool(object):
"""
Dummy process pool for using a single processor.
Imitates the api of multiprocessing.Pool using single-processor equivalents.
"""
def apply_async(self, function, args):
return function(*args)
def close(self):
pass
def join(self):
pass
class DequeQueue(collections.deque):
"""
deque wrapper implementing the Queue interface.
"""
def put(self, obj, block=None, timeout=None):
return self.append(obj)
def get(self, block=None, timeout=None):
return self.pop()
class AsyncCompletionException(Exception):
"""
Exception indicating that something went wrong with checking complete.
"""
def __init__(self, trace):
self.trace = trace
class TracebackWrapper(object):
"""
Class to wrap tracebacks so we can know they're not just strings.
"""
def __init__(self, trace):
self.trace = trace
def check_complete(task, out_queue):
"""
Checks if task is complete, puts the result to out_queue.
"""
logger.debug("Checking if %s is complete", task)
try:
is_complete = task.complete()
except Exception:
is_complete = TracebackWrapper(traceback.format_exc())
out_queue.put((task, is_complete))
class worker(Config):
ping_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
'worker alive only if it has a unique pending task, as '
'well as having keep-alive true')
wait_interval = FloatParameter(default=1.0,
config_path=dict(section='core', name='worker-wait-interval'))
wait_jitter = FloatParameter(default=5.0)
max_reschedules = IntParameter(default=1,
config_path=dict(section='core', name='worker-max-reschedules'))
timeout = IntParameter(default=0,
config_path=dict(section='core', name='worker-timeout'))
task_limit = IntParameter(default=None,
config_path=dict(section='core', name='worker-task-limit'))
retry_external_tasks = BoolParameter(default=False,
config_path=dict(section='core', name='retry-external-tasks'),
description='If true, incomplete external tasks will be '
'retested for completion while Luigi is running.')
class KeepAliveThread(threading.Thread):
"""
Periodically tell the scheduler that the worker still lives.
"""
def __init__(self, scheduler, worker_id, ping_interval):
super(KeepAliveThread, self).__init__()
self._should_stop = threading.Event()
self._scheduler = scheduler
self._worker_id = worker_id
self._ping_interval = ping_interval
def stop(self):
self._should_stop.set()
def run(self):
while True:
self._should_stop.wait(self._ping_interval)
if self._should_stop.is_set():
logger.info("Worker %s was stopped. Shutting down Keep-Alive thread" % self._worker_id)
break
with fork_lock:
try:
self._scheduler.ping(worker=self._worker_id)
except: # httplib.BadStatusLine:
logger.warning('Failed pinging scheduler')
class Worker(object):
"""
Worker object communicates with a scheduler.
Simple class that talks to a scheduler and:
* tells the scheduler what it has to do + its dependencies
* asks for stuff to do (pulls it in a loop and runs it)
"""
def __init__(self, scheduler=None, worker_id=None, worker_processes=1, assistant=False, **kwargs):
if scheduler is None:
scheduler = CentralPlannerScheduler()
self.worker_processes = int(worker_processes)
self._worker_info = self._generate_worker_info()
if not worker_id:
worker_id = 'Worker(%s)' % ', '.join(['%s=%s' % (k, v) for k, v in self._worker_info])
self._config = worker(**kwargs)
assert self._config.wait_interval >= _WAIT_INTERVAL_EPS, "[worker] wait_interval must be positive"
assert self._config.wait_jitter >= 0.0, "[worker] wait_jitter must be equal or greater than zero"
self._id = worker_id
self._scheduler = scheduler
self._assistant = assistant
self._stop_requesting_work = False
self.host = socket.gethostname()
self._scheduled_tasks = {}
self._suspended_tasks = {}
self._first_task = None
self.add_succeeded = True
self.run_succeeded = True
self.unfulfilled_counts = collections.defaultdict(int)
try:
signal.signal(signal.SIGUSR1, self.handle_interrupt)
except AttributeError:
pass
# Keep info about what tasks are running (could be in other processes)
if worker_processes == 1:
self._task_result_queue = DequeQueue()
else:
self._task_result_queue = multiprocessing.Queue()
self._running_tasks = {}
# Stuff for execution_summary
self._add_task_history = []
self._get_work_response_history = []
def _add_task(self, *args, **kwargs):
"""
Call ``self._scheduler.add_task``, but store the values too so we can
implement :py:func:`luigi.execution_summary.summary`.
"""
task_id = kwargs['task_id']
status = kwargs['status']
runnable = kwargs['runnable']
task = self._scheduled_tasks.get(task_id)
if task:
msg = (task, status, runnable)
self._add_task_history.append(msg)
self._scheduler.add_task(*args, **kwargs)
logger.info('Informed scheduler that task %s has status %s', task_id, status)
def __enter__(self):
"""
Start the KeepAliveThread.
"""
self._keep_alive_thread = KeepAliveThread(self._scheduler, self._id, self._config.ping_interval)
self._keep_alive_thread.daemon = True
self._keep_alive_thread.start()
return self
def __exit__(self, type, value, traceback):
"""
Stop the KeepAliveThread and kill still running tasks.
"""
self._keep_alive_thread.stop()
self._keep_alive_thread.join()
for task in self._running_tasks.values():
if task.is_alive():
task.terminate()
return False # Don't suppress exception
def _generate_worker_info(self):
# Generate as much info as possible about the worker
# Some of these calls might not be available on all OS's
args = [('salt', '%09d' % random.randrange(0, 999999999)),
('workers', self.worker_processes)]
try:
args += [('host', socket.gethostname())]
except BaseException:
pass
try:
args += [('username', getpass.getuser())]
except BaseException:
pass
try:
args += [('pid', os.getpid())]
except BaseException:
pass
try:
sudo_user = os.getenv("SUDO_USER")
if sudo_user:
args.append(('sudo_user', sudo_user))
except BaseException:
pass
return args
def _validate_task(self, task):
if not isinstance(task, Task):
raise TaskException('Can not schedule non-task %s' % task)
if not task.initialized():
# we can't get the repr of it since it's not initialized...
raise TaskException('Task of class %s not initialized. Did you override __init__ and forget to call super(...).__init__?' % task.__class__.__name__)
def _log_complete_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in complete() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_dependency_error(self, task, tb):
log_msg = "Will not schedule {task} or any dependencies due to error in deps() method:\n{tb}".format(task=task, tb=tb)
logger.warning(log_msg)
def _log_unexpected_error(self, task):
logger.exception("Luigi unexpected framework error while scheduling %s", task) # needs to be called from within except clause
def _email_complete_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in complete() method",
)
def _email_dependency_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} failed scheduling. Host: {host}",
headline="Will not schedule task or any dependencies due to error in deps() method",
)
def _email_unexpected_error(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: Framework error while scheduling {task}. Host: {host}",
headline="Luigi framework error",
)
def _email_task_failure(self, task, formatted_traceback):
self._email_error(task, formatted_traceback,
subject="Luigi: {task} FAILED. Host: {host}",
headline="A task failed when running. Most likely run() raised an exception.",
)
def _email_error(self, task, formatted_traceback, subject, headline):
formatted_subject = subject.format(task=task, host=self.host)
message = notifications.format_task_error(headline, task, formatted_traceback)
notifications.send_error_email(formatted_subject, message, task.owner_email)
def add(self, task, multiprocess=False):
"""
Add a Task for the worker to check and possibly schedule and run.
Returns True if task and its dependencies were successfully scheduled or completed before.
"""
if self._first_task is None and hasattr(task, 'task_id'):
self._first_task = task.task_id
self.add_succeeded = True
if multiprocess:
queue = multiprocessing.Manager().Queue()
pool = multiprocessing.Pool()
else:
queue = DequeQueue()
pool = SingleProcessPool()
self._validate_task(task)
pool.apply_async(check_complete, [task, queue])
# we track queue size ourselves because len(queue) won't work for multiprocessing
queue_size = 1
try:
seen = set([task.task_id])
while queue_size:
current = queue.get()
queue_size -= 1
item, is_complete = current
for next in self._add(item, is_complete):
if next.task_id not in seen:
self._validate_task(next)
seen.add(next.task_id)
pool.apply_async(check_complete, [next, queue])
queue_size += 1
except (KeyboardInterrupt, TaskException):
raise
except Exception as ex:
self.add_succeeded = False
formatted_traceback = traceback.format_exc()
self._log_unexpected_error(task)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_unexpected_error(task, formatted_traceback)
finally:
pool.close()
pool.join()
return self.add_succeeded
def _add(self, task, is_complete):
if self._config.task_limit is not None and len(self._scheduled_tasks) >= self._config.task_limit:
logger.warning('Will not schedule %s or any dependencies due to exceeded task-limit of %d', task, self._config.task_limit)
return
formatted_traceback = None
try:
self._check_complete_value(is_complete)
except KeyboardInterrupt:
raise
except AsyncCompletionException as ex:
formatted_traceback = ex.trace
except BaseException:
formatted_traceback = traceback.format_exc()
if formatted_traceback is not None:
self.add_succeeded = False
self._log_complete_error(task, formatted_traceback)
task.trigger_event(Event.DEPENDENCY_MISSING, task)
self._email_complete_error(task, formatted_traceback)
# abort, i.e. don't schedule any subtasks of a task with
# failing complete()-method since we don't know if the task
# is complete and subtasks might not be desirable to run if
# they have already ran before
return
if is_complete:
deps = None
status = DONE
runnable = False
task.trigger_event(Event.DEPENDENCY_PRESENT, task)
elif task.run == NotImplemented:
deps = None
status = PENDING
runnable = worker().retry_external_tasks
task.trigger_event(Event.DEPENDENCY_MISSING, task)
logger.warning('Data for %s does not exist (yet?). The task is an '
'external data depedency, so it can not be run from'
' this luigi process.', task)
else:
try:
deps = task.deps()
except Exception as ex:
formatted_traceback = traceback.format_exc()
self.add_succeeded = False
self._log_dependency_error(task, formatted_traceback)
task.trigger_event(Event.BROKEN_TASK, task, ex)
self._email_dependency_error(task, formatted_traceback)
return
status = PENDING
runnable = True
if task.disabled:
status = DISABLED
if deps:
for d in deps:
self._validate_dependency(d)
task.trigger_event(Event.DEPENDENCY_DISCOVERED, task, d)
yield d # return additional tasks to add
deps = [d.task_id for d in deps]
self._scheduled_tasks[task.task_id] = task
self._add_task(worker=self._id, task_id=task.task_id, status=status,
deps=deps, runnable=runnable, priority=task.priority,
resources=task.process_resources(),
params=task.to_str_params(),
family=task.task_family,
module=task.task_module)
def _validate_dependency(self, dependency):
if isinstance(dependency, Target):
raise Exception('requires() can not return Target objects. Wrap it in an ExternalTask class')
elif not isinstance(dependency, Task):
raise Exception('requires() must return Task objects')
def _check_complete_value(self, is_complete):
if is_complete not in (True, False):
if isinstance(is_complete, TracebackWrapper):
raise AsyncCompletionException(is_complete.trace)
raise Exception("Return value of Task.complete() must be boolean (was %r)" % is_complete)
def _add_worker(self):
self._worker_info.append(('first_task', self._first_task))
self._scheduler.add_worker(self._id, self._worker_info)
def _log_remote_tasks(self, running_tasks, n_pending_tasks, n_unique_pending):
logger.debug("Done")
logger.debug("There are no more tasks to run at this time")
if running_tasks:
for r in running_tasks:
logger.debug('%s is currently run by worker %s', r['task_id'], r['worker'])
elif n_pending_tasks:
logger.debug("There are %s pending tasks possibly being run by other workers", n_pending_tasks)
if n_unique_pending:
logger.debug("There are %i pending tasks unique to this worker", n_unique_pending)
def _get_work(self):
if self._stop_requesting_work:
return None, 0, 0, 0
logger.debug("Asking scheduler for work...")
r = self._scheduler.get_work(
worker=self._id,
host=self.host,
assistant=self._assistant,
current_tasks=list(self._running_tasks.keys()),
)
n_pending_tasks = r['n_pending_tasks']
task_id = r['task_id']
running_tasks = r['running_tasks']
n_unique_pending = r['n_unique_pending']
self._get_work_response_history.append(dict(
task_id=task_id,
running_tasks=running_tasks,
))
if task_id is not None and task_id not in self._scheduled_tasks:
logger.info('Did not schedule %s, will load it dynamically', task_id)
try:
# TODO: we should obtain the module name from the server!
self._scheduled_tasks[task_id] = \
load_task(module=r.get('task_module'),
task_name=r['task_family'],
params_str=r['task_params'])
except TaskClassException as ex:
msg = 'Cannot find task for %s' % task_id
logger.exception(msg)
subject = 'Luigi: %s' % msg
error_message = notifications.wrap_traceback(ex)
notifications.send_error_email(subject, error_message)
self._add_task(worker=self._id, task_id=task_id, status=FAILED, runnable=False,
assistant=self._assistant)
task_id = None
self.run_succeeded = False
return task_id, running_tasks, n_pending_tasks, n_unique_pending
def _run_task(self, task_id):
task = self._scheduled_tasks[task_id]
p = self._create_task_process(task)
self._running_tasks[task_id] = p
if self.worker_processes > 1:
with fork_lock:
p.start()
else:
# Run in the same process
p.run()
def _create_task_process(self, task):
def update_tracking_url(tracking_url):
self._scheduler.add_task(
task_id=task.task_id,
worker=self._id,
status=RUNNING,
tracking_url=tracking_url,
)
return TaskProcess(
task, self._id, self._task_result_queue,
random_seed=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout,
tracking_url_callback=update_tracking_url,
)
def _purge_children(self):
"""
Find dead children and put a response on the result queue.
:return:
"""
for task_id, p in six.iteritems(self._running_tasks):
if not p.is_alive() and p.exitcode:
error_msg = 'Task %s died unexpectedly with exit code %s' % (task_id, p.exitcode)
elif p.timeout_time is not None and time.time() > float(p.timeout_time) and p.is_alive():
p.terminate()
error_msg = 'Task %s timed out and was terminated.' % task_id
else:
continue
logger.info(error_msg)
self._task_result_queue.put((task_id, FAILED, error_msg, [], []))
def _handle_next_task(self):
"""
We have to catch three ways a task can be "done":
1. normal execution: the task runs/fails and puts a result back on the queue,
2. new dependencies: the task yielded new deps that were not complete and
will be rescheduled and dependencies added,
3. child process dies: we need to catch this separately.
"""
while True:
self._purge_children() # Deal with subprocess failures
try:
task_id, status, expl, missing, new_requirements = (
self._task_result_queue.get(
timeout=self._config.wait_interval))
except Queue.Empty:
return
task = self._scheduled_tasks[task_id]
if not task or task_id not in self._running_tasks:
continue
# Not a running task. Probably already removed.
# Maybe it yielded something?
if status == FAILED and expl:
# If no expl, it is because of a retry-external-task failure.
self._email_task_failure(task, expl)
new_deps = []
if new_requirements:
new_req = [load_task(module, name, params)
for module, name, params in new_requirements]
for t in new_req:
self.add(t)
new_deps = [t.task_id for t in new_req]
self._add_task(worker=self._id,
task_id=task_id,
status=status,
expl=json.dumps(expl),
resources=task.process_resources(),
runnable=None,
params=task.to_str_params(),
family=task.task_family,
module=task.task_module,
new_deps=new_deps,
assistant=self._assistant)
self._running_tasks.pop(task_id)
# re-add task to reschedule missing dependencies
if missing:
reschedule = True
# keep out of infinite loops by not rescheduling too many times
for task_id in missing:
self.unfulfilled_counts[task_id] += 1
if (self.unfulfilled_counts[task_id] >
self._config.max_reschedules):
reschedule = False
if reschedule:
self.add(task)
self.run_succeeded &= (status == DONE) or (len(new_deps) > 0)
return
def _sleeper(self):
# TODO is exponential backoff necessary?
while True:
jitter = self._config.wait_jitter
wait_interval = self._config.wait_interval + random.uniform(0, jitter)
logger.debug('Sleeping for %f seconds', wait_interval)
time.sleep(wait_interval)
yield
def _keep_alive(self, n_pending_tasks, n_unique_pending):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
else:
return n_pending_tasks and (n_unique_pending or not self._config.count_uniques)
def handle_interrupt(self, signum, _):
"""
Stops the assistant from asking for more work on SIGUSR1
"""
if signum == signal.SIGUSR1:
self._config.keep_alive = False
self._stop_requesting_work = True
def run(self):
"""
Returns True if all scheduled tasks were executed successfully.
"""
logger.info('Running Worker with %d processes', self.worker_processes)
sleeper = self._sleeper()
self.run_succeeded = True
self._add_worker()
while True:
while len(self._running_tasks) >= self.worker_processes:
logger.debug('%d running tasks, waiting for next task to finish', len(self._running_tasks))
self._handle_next_task()
task_id, running_tasks, n_pending_tasks, n_unique_pending = self._get_work()
if task_id is None:
if not self._stop_requesting_work:
self._log_remote_tasks(running_tasks, n_pending_tasks, n_unique_pending)
if len(self._running_tasks) == 0:
if self._keep_alive(n_pending_tasks, n_unique_pending):
six.next(sleeper)
continue
else:
break
else:
self._handle_next_task()
continue
# task_id is not None:
logger.debug("Pending tasks: %s", n_pending_tasks)
self._run_task(task_id)
while len(self._running_tasks):
logger.debug('Shut down Worker, %d more tasks to go', len(self._running_tasks))
self._handle_next_task()
return self.run_succeeded
|
apache-2.0
| 7,350,108,259,306,369,000 | 37.214612 | 160 | 0.577668 | false |
krount/sslyze
|
sslyze/plugins/heartbleed_plugin.py
|
1
|
6967
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
import socket
import types
from xml.etree.ElementTree import Element
from nassl._nassl import WantReadError
from sslyze.plugins import plugin_base
from sslyze.plugins.plugin_base import PluginScanResult, PluginScanCommand
from sslyze.server_connectivity import ServerConnectivityInfo
from tls_parser.alert_protocol import TlsAlertRecord
from tls_parser.exceptions import NotEnoughData
from tls_parser.handshake_protocol import TlsServerHelloDoneRecord, TlsHandshakeRecord
from tls_parser.heartbeat_protocol import TlsHeartbeatRequestRecord
from tls_parser.parser import TlsRecordParser
from tls_parser.record_protocol import TlsVersionEnum
class HeartbleedScanCommand(PluginScanCommand):
"""Test the server(s) for the OpenSSL Heartbleed vulnerability.
"""
@classmethod
def get_cli_argument(cls):
return 'heartbleed'
class HeartbleedPlugin(plugin_base.Plugin):
"""Test the server(s) for the OpenSSL Heartbleed vulnerability (CVE-2014-0160).
"""
@classmethod
def get_available_commands(cls):
return [HeartbleedScanCommand]
def process_task(self, server_info, scan_command):
# type: (ServerConnectivityInfo, HeartbleedScanCommand) -> HeartbleedScanResult
ssl_connection = server_info.get_preconfigured_ssl_connection()
# Replace nassl.sslClient.do_handshake() with a heartbleed checking SSL handshake so that all the SSLyze options
# (startTLS, proxy, etc.) still work
ssl_connection.do_handshake = types.MethodType(do_handshake_with_heartbleed, ssl_connection)
is_vulnerable_to_heartbleed = False
try:
# Start the SSL handshake
ssl_connection.connect()
except VulnerableToHeartbleed:
# The test was completed and the server is vulnerable
is_vulnerable_to_heartbleed = True
except NotVulnerableToHeartbleed:
# The test was completed and the server is NOT vulnerable
pass
finally:
ssl_connection.close()
return HeartbleedScanResult(server_info, scan_command, is_vulnerable_to_heartbleed)
class HeartbleedScanResult(PluginScanResult):
"""The result of running a HeartbleedScanCommand on a specific server.
Attributes:
is_vulnerable_to_heartbleed (bool): True if the server is vulnerable to the Heartbleed attack.
"""
COMMAND_TITLE = 'OpenSSL Heartbleed'
def __init__(self, server_info, scan_command, is_vulnerable_to_heartbleed):
# type: (ServerConnectivityInfo, HeartbleedScanCommand, bool) -> None
super(HeartbleedScanResult, self).__init__(server_info, scan_command)
self.is_vulnerable_to_heartbleed = is_vulnerable_to_heartbleed
def as_text(self):
heartbleed_txt = 'VULNERABLE - Server is vulnerable to Heartbleed' \
if self.is_vulnerable_to_heartbleed \
else 'OK - Not vulnerable to Heartbleed'
return [self._format_title(self.COMMAND_TITLE), self._format_field('', heartbleed_txt)]
def as_xml(self):
xml_output = Element(self.scan_command.get_cli_argument(), title=self.COMMAND_TITLE)
xml_output.append(Element('openSslHeartbleed', isVulnerable=str(self.is_vulnerable_to_heartbleed)))
return xml_output
class VulnerableToHeartbleed(Exception):
"""Exception to raise during the handshake to hijack the flow and test for Heartbleed.
"""
class NotVulnerableToHeartbleed(Exception):
"""Exception to raise during the handshake to hijack the flow and test for Heartbleed.
"""
def do_handshake_with_heartbleed(self):
"""Modified do_handshake() to send a heartbleed payload and return the result.
"""
try:
# Start the handshake using nassl - will throw WantReadError right away
self._ssl.do_handshake()
except WantReadError:
# Send the Client Hello
len_to_read = self._network_bio.pending()
while len_to_read:
# Get the data from the SSL engine
handshake_data_out = self._network_bio.read(len_to_read)
# Send it to the peer
self._sock.send(handshake_data_out)
len_to_read = self._network_bio.pending()
# Build the heartbleed payload - based on
# https://blog.mozilla.org/security/2014/04/12/testing-for-heartbleed-vulnerability-without-exploiting-the-server/
payload = TlsHeartbeatRequestRecord.from_parameters(
tls_version=TlsVersionEnum[self._ssl_version.name],
heartbeat_data=b'\x01' * 16381
).to_bytes()
payload += TlsHeartbeatRequestRecord.from_parameters(
TlsVersionEnum[self._ssl_version.name],
heartbeat_data=b'\x01\x00\x00'
).to_bytes()
# Send the payload
self._sock.send(payload)
# Retrieve the server's response - directly read the underlying network socket
# Retrieve data until we get to the ServerHelloDone
# The server may send back a ServerHello, an Alert or a CertificateRequest first
did_receive_hello_done = False
remaining_bytes = b''
while not did_receive_hello_done:
try:
tls_record, len_consumed = TlsRecordParser.parse_bytes(remaining_bytes)
remaining_bytes = remaining_bytes[len_consumed::]
except NotEnoughData:
# Try to get more data
raw_ssl_bytes = self._sock.recv(16381)
if not raw_ssl_bytes:
# No data?
break
remaining_bytes = remaining_bytes + raw_ssl_bytes
continue
if isinstance(tls_record, TlsServerHelloDoneRecord):
did_receive_hello_done = True
elif isinstance(tls_record, TlsHandshakeRecord):
# Could be a ServerHello, a Certificate or a CertificateRequest if the server requires client auth
pass
elif isinstance(tls_record, TlsAlertRecord):
# Server returned a TLS alert
break
else:
raise ValueError('Unknown record? Type {}'.format(tls_record.header.type))
is_vulnerable_to_heartbleed = False
if did_receive_hello_done:
expected_heartbleed_payload = b'\x01' * 10
if expected_heartbleed_payload in remaining_bytes:
# Server replied with our hearbeat payload
is_vulnerable_to_heartbleed = True
else:
try:
raw_ssl_bytes = self._sock.recv(16381)
except socket.error:
# Server closed the connection after receiving the heartbleed payload
raise NotVulnerableToHeartbleed()
if expected_heartbleed_payload in raw_ssl_bytes:
# Server replied with our hearbeat payload
is_vulnerable_to_heartbleed = True
if is_vulnerable_to_heartbleed:
raise VulnerableToHeartbleed()
else:
raise NotVulnerableToHeartbleed()
|
gpl-2.0
| 521,774,391,793,940,600 | 37.705556 | 120 | 0.679776 | false |
keseldude/brobot
|
brobot/plugins/users.py
|
1
|
1217
|
#===============================================================================
# brobot
# Copyright (C) 2010 Michael Keselman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#===============================================================================
from core import bot
class UsersPlugin(bot.CommandPlugin):
name = 'users'
def process(self, connection, source, target, args):
channel = self.ircbot.find_channel(connection.server, target)
if channel is not None:
num_users = len(channel.users)
return self.privmsg(target, u'%d Users in the channel.' % num_users)
|
gpl-3.0
| -2,537,083,521,998,450,000 | 40.965517 | 80 | 0.624486 | false |
amstart/demo
|
vote/polls/forms.py
|
1
|
1604
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
class PremiseForm(forms.Form):
radio_buttons = forms.ChoiceField(
choices = (
('option_one', "Option one is this and that be sure to include why it's great"),
('option_two', "Option two can is something else and selecting it will deselect option one")
),
widget = forms.RadioSelect,
initial = 'option_two',
)
appended_text = forms.CharField(
help_text = "Here's more help text"
)
prepended_text = forms.CharField()
prepended_text_two = forms.CharField()
multicolon_select = forms.MultipleChoiceField(
choices = (('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5')),
)
# Uni-form
helper = FormHelper()
helper.form_class = 'form-horizontal'
helper.layout = Layout(
Field('text_input', css_class='input-xlarge'),
Field('textarea', rows="3", css_class='input-xlarge'),
'radio_buttons',
AppendedText('appended_text', '.00'),
PrependedText('prepended_text', '<input type="checkbox" checked="checked" value="" id="" name="">', active=True),
PrependedText('prepended_text_two', '@'),
'multicolon_select',
FormActions(
Submit('save_changes', 'Save changes', css_class="btn-primary"),
Submit('cancel', 'Cancel'),
)
)
|
mit
| -2,778,027,478,078,338,600 | 33.12766 | 121 | 0.613466 | false |
nicfitzgerald/pyrogue-game
|
map_utils.py
|
1
|
4764
|
from tdl.map import Map
from random import randint
from components.ai import BasicMonster
from components.fighter import Fighter
from entity import Entity
class GameMap(Map):
def __init__(self, width, height):
super().__init__(width, height)
self.explored = [[False for y in range(height)] for x in range(width)]
class Rect:
def __init__(self, x, y, w, h):
self.x1 = x
self.y1 = y
self.x2 = x + w
self.y2 = y + h
def center(self):
center_x = int((self.x1 + self.x2) / 2)
center_y = int((self.y1 + self.y2) / 2)
return (center_x, center_y)
def intersect(self, other):
# returns true if this rectangle intersects with another one
return (self.x1 <= other.x2 and self.x2 >= other.x1 and
self.y1 <= other.y2 and self.y2 >= other.y1)
def create_room(game_map, room):
# Go through the tiles and make them passable
for x in range(room.x1 + 1, room.x2):
for y in range(room.y1 + 1, room.y2):
game_map.walkable[x, y] = True
game_map.transparent[x, y] = True
def create_h_tunnel(game_map, x1, x2, y):
for x in range(min(x1, x2), max(x1, x2) + 1):
game_map.walkable[x, y] = True
game_map.transparent[x, y] = True
def create_v_tunnel(game_map, y1, y2, x):
for y in range(min(y1, y2), max(y1, y2) + 1):
game_map.walkable[x, y] = True
game_map.transparent[x, y] = True
def place_entities(room, entities, max_monsters_per_room, colors):
# Get a random number of monsters
number_of_monsters = randint(0, max_monsters_per_room)
for i in range(number_of_monsters):
# Choose a random location in the room
x = randint(room.x1 + 1, room.x2 - 1)
y = randint(room.y1 + 1, room.y2 - 1)
if not any([entity for entity in entities if entity.x == x and entity.y == y]):
if randint(0, 100) < 80:
fighter_component = Fighter(hp=10, defense=0, power=3)
ai_component = BasicMonster()
monster = Entity(x, y, 'o', colors.get('desaturated_green'), 'Orc', blocks=True,
fighter=fighter_component, ai=ai_component)
else:
fighter_component = Fighter(hp=16, defense=1, power=4)
ai_component = BasicMonster()
monster = Entity(x, y, 'T', colors.get('darker_green'), 'Troll', blocks=True,
fighter=fighter_component, ai=ai_component)
entities.append(monster)
def make_map(game_map, max_rooms, room_min_size, room_max_size,
map_width, map_height, player, entities, max_monsters_per_room, colors):
rooms = []
num_rooms = 0
for r in range(max_rooms):
# random width and height
w = randint(room_min_size, room_max_size)
h = randint(room_min_size, room_max_size)
# random position without going out of the boundaries of the map
x = randint(0, map_width - w - 1)
y = randint(0, map_height - h - 1)
# "Rect" class makes rectangles easier to work with
new_room = Rect(x, y, w, h)
# run through the other rooms and see if they intersect with this one
for other_room in rooms:
if new_room.intersect(other_room):
break
else:
# this means there are no intersections, so this room is valid
# "paint" it to the map's tiles
create_room(game_map, new_room)
# center coordinates of new room, will be useful later
(new_x, new_y) = new_room.center()
if num_rooms == 0:
# this is the first room, where the player starts at
player.x = new_x
player.y = new_y
else:
# all rooms after the first:
# connect it to the previous room with a tunnel
# center coordinates of previous room
(prev_x, prev_y) = rooms[num_rooms - 1].center()
# flip a coin (random number that is either 0 or 1)
if randint(0, 1) == 1:
# first move horizontally, then vertically
create_h_tunnel(game_map, prev_x, new_x, prev_y)
create_v_tunnel(game_map, prev_y, new_y, new_x)
else:
# first move vertically, then horizontally
create_v_tunnel(game_map, prev_y, new_y, prev_x)
create_h_tunnel(game_map, prev_x, new_x, new_y)
place_entities(new_room, entities, max_monsters_per_room, colors)
# finally, append the new room to the list
rooms.append(new_room)
num_rooms += 1
|
gpl-3.0
| 7,027,452,467,717,760,000 | 37.12 | 96 | 0.561293 | false |
open-synergy/opnsynid-l10n-indonesia
|
l10n_id_taxform_faktur_pajak_common/models/faktur_pajak_common.py
|
1
|
24141
|
# -*- coding: utf-8 -*-
# Copyright 2017 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
from datetime import datetime
import re
class FakturPajakCommon(models.AbstractModel):
_name = "l10n_id.faktur_pajak_common"
_description = "Faktur Pajak"
_inherit = ["mail.thread"]
@api.depends(
"transaction_type_id",
)
@api.multi
def _compute_jenis_transaksi(self):
for fp in self:
fp.enofa_jenis_transaksi = fp.transaction_type_id.code
@api.depends(
"transaction_type_id",
)
@api.multi
def _compute_fg_pengganti(self):
for fp in self:
fp.enofa_fg_pengganti = fp.fp_state
@api.depends(
"name",
)
@api.multi
def _compute_nomor_dokumen(self):
for fp in self:
fp.enofa_nomor_dokumen = fp.name
@api.depends(
"creditable",
)
@api.multi
def _compute_is_creditable(self):
for fp in self:
fp.enofa_is_creditable = fp.creditable
@api.depends(
"seller_branch_id",
"buyer_branch_id",
"fp_direction",
)
@api.multi
def _compute_nama(self):
for fp in self:
fp.enofa_nama = "-"
if fp.fp_direction == "keluaran":
fp.enofa_nama = fp.buyer_branch_id.name
elif fp.fp_direction == "masukan":
fp.enofa_nama = fp.seller_branch_id.name
@api.depends(
"seller_branch_id",
"buyer_branch_id",
"fp_direction",
)
@api.multi
def _compute_alamat_lengkap(self):
for fp in self:
fp.enofa_alamat_lengkap = "-"
if fp.fp_direction == "keluaran":
fp.enofa_alamat_lengkap = fp.buyer_branch_id.enofa_address
elif fp.fp_direction == "masukan":
fp.enofa_alamat_lengkap = fp.seller_branch_id.enofa_address
@api.depends(
"seller_branch_id",
"buyer_branch_id",
"fp_direction",
)
@api.multi
def _compute_npwp(self):
for fp in self:
fp.enofa_npwp = "000000000000000"
if fp.fp_direction == "keluaran":
if fp.seller_branch_id.vat:
npwp = fp.seller_branch_id.vat
self.enofa_npwp = ""
for s in re.findall(r"\d+", npwp):
self.enofa_npwp += s
elif fp.fp_direction == "masukan":
if fp.buyer_branch_id.vat:
npwp = fp.buyer_branch_id.vat
self.enofa_npwp = ""
for s in re.findall(r"\d+", npwp):
self.enofa_npwp += s
@api.depends(
"date",
)
@api.multi
def _compute_tanggal_dokumen(self):
for fp in self:
fp.enofa_tanggal_dokumen = "-"
if fp.date:
fp.enofa_tanggal_dokumen = datetime.strptime(
fp.date, "%Y-%m-%d").strftime(
"%d/%m/%Y")
@api.depends(
"taxform_period_id",
)
@api.multi
def _compute_masa_pajak(self):
for fp in self:
fp.enofa_masa_pajak = fp.taxform_period_id.code
@api.depends(
"taxform_year_id",
)
@api.multi
def _compute_tahun_pajak(self):
for fp in self:
fp.enofa_tahun_pajak = fp.taxform_year_id.code
@api.depends(
"base",
)
@api.multi
def _compute_jumlah_dpp(self):
for fp in self:
fp.enofa_jumlah_dpp = int(fp.base_company_currency)
@api.depends(
"ppn_amount",
)
@api.multi
def _compute_jumlah_ppn(self):
for fp in self:
fp.enofa_jumlah_ppn = int(fp.ppn_amount)
@api.depends(
"ppnbm_amount",
)
@api.multi
def _compute_jumlah_ppnbm(self):
for fp in self:
fp.enofa_jumlah_ppnbm = int(fp.ppnbm_amount)
@api.depends(
"date",
)
@api.multi
def _compute_taxform_period(self):
for fp in self:
fp.taxform_period_id = False
if fp.date:
fp.taxform_period_id = self.env["l10n_id.tax_period"].\
_find_period(fp.date).id
@api.depends(
"taxform_period_id",
)
@api.multi
def _compute_taxform_year(self):
for fp in self:
fp.taxform_year_id = False
if fp.taxform_period_id:
fp.taxform_year_id = fp.taxform_period_id.year_id.id
@api.depends(
"type_id",
)
def _compute_transaction_type(self):
for fp in self:
fp.allowed_transaction_type_ids = fp.type_id.\
allowed_transaction_type_ids.ids
@api.depends(
"type_id",
"transaction_type_id",
)
def _compute_tax_code(self):
obj_dpp_code = self.env["l10n_id.faktur_pajak_allowed_dpp_tax_code"]
obj_ppn_code = self.env["l10n_id.faktur_pajak_allowed_ppn_tax_code"]
obj_ppnbm_code = self.env[
"l10n_id.faktur_pajak_allowed_ppnbm_tax_code"]
for fp in self:
criteria = [
("type_id", "=", fp.type_id.id),
("transaction_type_id", "=", fp.transaction_type_id.id),
]
dpp_codes = obj_dpp_code.search(criteria)
for dpp_code in dpp_codes:
fp.allowed_dpp_tax_code_ids += dpp_code.tax_code_ids
ppn_codes = obj_ppn_code.search(criteria)
for ppn_code in ppn_codes:
fp.allowed_ppn_tax_code_ids += ppn_code.tax_code_ids
ppnbm_codes = obj_ppnbm_code.search(criteria)
for ppnbm_code in ppnbm_codes:
fp.allowed_ppnbm_tax_code_ids += ppnbm_code.tax_code_ids
@api.depends(
"type_id",
)
def _compute_additional_flag(self):
for fp in self:
fp.allowed_additional_flag_ids = fp.type_id.\
allowed_additional_flag_ids.ids
@api.depends(
"type_id",
)
@api.multi
def _compute_allow_reverse(self):
for fp in self:
fp.allow_reverse = fp.type_id.allow_reverse
@api.depends(
"type_id",
)
@api.multi
def _compute_allow_multiple_reference(self):
for fp in self:
fp.allow_multiple_reference = fp.type_id.allow_multiple_reference
@api.depends(
"reverse_id",
)
@api.multi
def _compute_nomor_dokumen_balik(self):
for fp in self:
fp.enofa_nomor_dokumen_balik = "-"
if fp.reverse_id:
fp.enofa_nomor_dokumen_balik = fp.reverse_id.name
@api.depends(
"reverse_id",
)
@api.multi
def _compute_tanggal_dokumen_balik(self):
for fp in self:
fp.enofa_tanggal_dokumen_balik = "-"
if fp.reverse_id:
fp.enofa_tanggal_dokumen_balik = datetime.strptime(
fp.reverse_id.date, "%Y-%m-%d").strftime(
"%d/%m/%Y")
@api.depends(
"reference_id",
"reference_ids",
"allow_multiple_reference",
)
@api.multi
def _compute_all_reference(self):
for fp in self:
if fp.type_id.allow_multiple_reference:
fp.all_reference_ids = fp.reference_ids.ids
else:
fp.all_reference_ids = fp.reference_id and \
[fp.reference_id.id] or False
@api.depends(
"type_id",
)
@api.multi
def _compute_allow_creditable(self):
for fp in self:
fp.allow_creditable = fp.type_id.allow_creditable
@api.depends(
"type_id",
)
@api.multi
def _compute_allow_substitute(self):
for fp in self:
fp.allow_substitute = fp.type_id.allow_substitute
name = fields.Char(
string="# Faktur Pajak",
required=True,
readonly=True,
default="/",
states={
"draft": [("readonly", False)],
},
)
@api.model
def _default_company_id(self):
return self.env.user.company_id.id
company_id = fields.Many2one(
string="Company",
comodel_name="res.company",
required=True,
readonly=True,
default=lambda self: self._default_company_id(),
states={
"draft": [("readonly", False)],
},
)
currency_id = fields.Many2one(
string="Currency",
comodel_name="res.currency",
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
@api.model
def _default_company_currency(self):
return self.env.user.company_id.currency_id.id
company_currency_id = fields.Many2one(
string="Company Currency",
comodel_name="res.currency",
required=True,
readonly=True,
default=lambda self: self._default_company_currency(),
states={
"draft": [("readonly", False)]},
)
@api.model
def _default_fp_direction(self):
fp_type = self._get_faktur_pajak_type()
if fp_type:
return fp_type.fp_direction
else:
return "keluaran"
fp_direction = fields.Selection(
string="Jenis Faktur Pajak",
selection=[
("masukan", "Masukan"),
("keluaran", "Keluaran"),
],
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
default=lambda self: self._default_fp_direction(),
)
transaction_type_id = fields.Many2one(
string="Transaction Type",
comodel_name="l10n_id.faktur_pajak_transaction_type",
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
@api.model
def _get_faktur_pajak_type(self):
return False
@api.model
def _default_faktur_pajak_type(self):
fp_type = self._get_faktur_pajak_type()
if fp_type:
return fp_type.id
else:
return False
type_id = fields.Many2one(
string="Type",
comodel_name="l10n_id.faktur_pajak_type",
required=True,
default=lambda self: self._default_faktur_pajak_type(),
)
@api.model
def _default_fp_state(self):
return "0"
fp_state = fields.Selection(
string="Normal/Penggantian?",
selection=[
("0", "Normal"),
("1", "Penggantian"),
],
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
default=lambda self: self._default_fp_state(),
)
@api.model
def _default_seller_partner(self):
if self._default_fp_direction() == "keluaran":
return self.env.user.company_id.partner_id.id
seller_partner_id = fields.Many2one(
comodel_name="res.partner",
string="Seller",
required=True,
default=lambda self: self._default_seller_partner(),
readonly=True,
states={
"draft": [("readonly", False)],
},
)
seller_branch_id = fields.Many2one(
comodel_name="res.partner",
string="Seller Branch",
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
)
@api.model
def _default_buyer_partner(self):
if self._default_fp_direction() == "masukan":
return self.env.user.company_id.partner_id.id
buyer_partner_id = fields.Many2one(
comodel_name="res.partner",
string="Buyer",
required=True,
default=lambda self: self._default_buyer_partner(),
readonly=True,
states={
"draft": [("readonly", False)],
},
)
buyer_branch_id = fields.Many2one(
comodel_name="res.partner",
string="Buyer Branch",
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
)
base = fields.Float(
string="Base",
digits_compute=dp.get_precision("Account"),
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
base_company_currency = fields.Float(
string="Base in Company Currency",
digits_compute=dp.get_precision("Account"),
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
ppn_amount = fields.Float(
string="PPn Amount",
digits_compute=dp.get_precision("Account"),
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
ppnbm_amount = fields.Float(
string="PPnBm Amount",
digits_compute=dp.get_precision("Account"),
required=True,
readonly=True,
states={
"draft": [("readonly", False)]},
)
date = fields.Date(
string="Document Date",
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
)
taxform_period_id = fields.Many2one(
string="Masa Pajak",
comodel_name="l10n_id.tax_period",
compute="_compute_taxform_period",
store=True,
)
taxform_year_id = fields.Many2one(
string="Tahun Pajak",
comodel_name="l10n_id.tax_year",
compute="_compute_taxform_year",
store=True,
)
note = fields.Text(
string="Note",
)
allow_multiple_reference = fields.Boolean(
string="Allow Multiple Doc. References",
compute="_compute_allow_multiple_reference",
store=False,
)
reference_id = fields.Many2one(
string="Doc. Reference",
comodel_name="account.move.line",
readonly=True,
states={
"draft": [("readonly", False)]},
)
reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move.line",
relation="rel_fp_dummy",
readonly=True,
states={
"draft": [("readonly", False)]},
)
all_reference_ids = fields.Many2many(
string="Doc. References",
comodel_name="account.move",
relation="rel_fp_all_dummy",
compute="_compute_all_reference",
store=True,
)
allowed_transaction_type_ids = fields.Many2many(
string="Allowed Transaction Type",
comodel_name="l10n_id.faktur_pajak_transaction_type",
compute="_compute_transaction_type",
store=False,
)
allowed_dpp_tax_code_ids = fields.Many2many(
string="Allowed DPP Tax Codes",
comodel_name="account.tax.code",
compute="_compute_tax_code",
store=False,
)
allowed_ppn_tax_code_ids = fields.Many2many(
string="Allowed PPn Tax Codes",
comodel_name="account.tax.code",
compute="_compute_tax_code",
store=False,
)
allowed_ppnbm_tax_code_ids = fields.Many2many(
string="Allowed PPnBm Tax Codes",
comodel_name="account.tax.code",
compute="_compute_tax_code",
store=False,
)
allowed_additional_flag_ids = fields.Many2many(
string="Allowed Additional Flags",
comodel_name="l10n_id.faktur_pajak_additional_flag",
compute="_compute_additional_flag",
store=False,
)
additional_flag_id = fields.Many2one(
string="Additional Flag",
comodel_name="l10n_id.faktur_pajak_additional_flag",
readonly=True,
states={
"draft": [("readonly", False)]},
)
reverse_id = fields.Many2one(
string="Reverse From",
comodel_name="l10n_id.faktur_pajak_common",
readonly=True,
states={
"draft": [("readonly", False)]},
)
allow_reverse = fields.Boolean(
string="Allow to Reverse Document",
compute="_compute_allow_reverse",
store=False,
)
substitute_id = fields.Many2one(
string="Substitute For",
comodel_name="l10n_id.faktur_pajak_common",
readonly=True,
states={
"draft": [("readonly", False)]},
)
allow_substitute = fields.Boolean(
string="Allow to Substitute Document",
compute="_compute_allow_substitute",
store=False,
)
allow_creditable = fields.Boolean(
string="Allow to Creditable",
compute="_compute_allow_creditable",
store=False,
)
@api.model
def _default_creditable(self):
return "0"
creditable = fields.Selection(
string="Bisa Dikreditkan?",
selection=[
("0", "Tidak Dikreditkan"),
("1", "Dikreditkan"),
],
required=True,
readonly=True,
states={
"draft": [("readonly", False)],
},
default=lambda self: self._default_creditable(),
)
state = fields.Selection(
string="State",
required=True,
readonly=True,
default="draft",
track_visibility="onchange",
selection=[
("draft", "Draft"),
("confirmed", "Waiting for Approval"),
("done", "Done"),
("cancelled", "Cancelled"),
],
)
# E-NOFA FIELDS
enofa_jenis_transaksi = fields.Char(
string="KD_JENIS_TRANSAKSI",
compute="_compute_jenis_transaksi",
store=False,
)
enofa_fg_pengganti = fields.Char(
string="FG_PENGGANTI",
compute="_compute_fg_pengganti",
store=False,
)
enofa_nomor_dokumen = fields.Char(
string="NOMOR_DOKUMEN",
compute="_compute_nomor_dokumen",
store=False,
)
enofa_masa_pajak = fields.Char(
string="MASA_PAJAK",
compute="_compute_masa_pajak",
store=False,
)
enofa_tahun_pajak = fields.Char(
string="TAHUN_PAJAK",
compute="_compute_tahun_pajak",
store=False,
)
enofa_tanggal_dokumen = fields.Char(
string="TANGGAL_DOKUMEN",
compute="_compute_tanggal_dokumen",
store=False,
)
enofa_npwp = fields.Char(
string="NPWP",
compute="_compute_npwp",
store=False,
)
enofa_nama = fields.Char(
string="NAMA",
compute="_compute_nama",
store=False,
)
enofa_alamat_lengkap = fields.Char(
string="ALAMAT_LENGKAP",
compute="_compute_alamat_lengkap",
store=False,
)
enofa_jumlah_dpp = fields.Integer(
string="JUMLAH_DPP",
compute="_compute_jumlah_dpp",
store=False,
)
enofa_jumlah_ppn = fields.Integer(
string="JUMLAH_PPN",
compute="_compute_jumlah_ppn",
store=False,
)
enofa_jumlah_ppnbm = fields.Integer(
string="JUMLAH_DPP",
compute="_compute_jumlah_ppnbm",
store=False,
)
enofa_is_creditable = fields.Char(
string="IS_CREDITABLE",
compute="_compute_is_creditable",
store=False,
)
enofa_nomor_dokumen_balik = fields.Char(
string="-",
compute="_compute_nomor_dokumen_balik",
store=False,
)
enofa_tanggal_dokumen_balik = fields.Char(
string="-",
compute="_compute_tanggal_dokumen_balik",
store=False,
)
@api.multi
def workflow_action_confirm(self):
for fp in self:
fp.write(
fp._prepare_confirm_data())
@api.multi
def _prepare_confirm_data(self):
self.ensure_one()
return {
"state": "confirmed",
}
@api.multi
def workflow_action_done(self):
for fp in self:
fp.write(
fp._prepare_done_data())
@api.multi
def _prepare_done_data(self):
self.ensure_one()
return {
"state": "done",
}
@api.multi
def workflow_action_cancel(self):
for fp in self:
fp.write(
fp._prepare_cancel_data())
@api.multi
def _prepare_cancel_data(self):
self.ensure_one()
return {
"state": "cancelled",
}
@api.multi
def workflow_action_reset(self):
for fp in self:
fp.write(
fp._prepare_reset_data())
@api.multi
def _prepare_reset_data(self):
self.ensure_one()
return {
"state": "draft",
}
@api.onchange("seller_partner_id")
def onchange_seller(self):
if self.seller_partner_id:
partner = self.seller_partner_id.commercial_partner_id
if self.seller_branch_id:
branch = self.seller_branch_id.commercial_partner_id
if partner != branch:
self.seller_branch_id = False
else:
self.seller_branch_id = self.seller_partner_id
else:
self.seller_branch_id = False
@api.onchange(
"reference_ids",
"reference_id",
)
def onchange_all_reference(self):
obj_line = self.env["account.move.line"]
if self.fp_direction == "masukan":
partner_id = self.seller_partner_id and \
self.seller_partner_id.id or 0
else:
partner_id = self.buyer_partner_id and \
self.buyer_partner_id.id or 0
criteria = [
("move_id", "in", self.all_reference_ids.ids),
("tax_code_id", "in", self.allowed_dpp_tax_code_ids.ids),
("partner_id", "=", partner_id),
]
for line in obj_line.search(criteria):
if line.currency_id:
self.base += abs(line.amount_currency)
else:
self.base += abs(line.tax_amount)
self.base_company_currency += abs(line.tax_amount)
criteria = [
("move_id", "in", self.all_reference_ids.ids),
("tax_code_id", "in", self.allowed_ppn_tax_code_ids.ids),
("partner_id", "=", partner_id),
]
for line in obj_line.search(criteria):
self.ppn_amount += abs(line.tax_amount)
criteria = [
("move_id", "in", self.all_reference_ids.ids),
("tax_code_id", "in", self.allowed_ppnbm_tax_code_ids.ids),
("partner_id", "=", partner_id),
]
for line in obj_line.search(criteria):
self.ppnbm_amount += abs(line.tax_amount)
@api.onchange("buyer_partner_id")
def onchange_buyer(self):
if self.buyer_partner_id:
partner = self.buyer_partner_id.commercial_partner_id
if self.buyer_branch_id:
branch = self.buyer_branch_id.commercial_partner_id
if partner != branch:
self.buyer_branch_id = False
else:
self.buyer_branch_id = self.buyer_partner_id
else:
self.buyer_branch_id = False
@api.onchange("company_id")
def onchange_company_id(self):
self.currency_id = False
if self.company_id:
self.currency_id = self.company_id.currency_id.id
|
agpl-3.0
| -7,488,912,429,143,546,000 | 27.440244 | 77 | 0.518413 | false |
kije/PySpyX
|
pyspy/migrations/0004_auto_20150515_1104.py
|
1
|
1390
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pyspy', '0003_surveillancevideos'),
]
operations = [
migrations.CreateModel(
name='SurveillanceVideo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, help_text='',
verbose_name='ID')),
('date', models.DateTimeField(help_text='', verbose_name='Capture Date', auto_now_add=True)),
('path', models.CharField(help_text='', max_length=512, verbose_name='Path')),
('last_modified',
models.DateTimeField(auto_now=True, help_text='', null=True, verbose_name='Last modified')),
],
),
migrations.DeleteModel(
name='SurveillanceVideos',
),
migrations.AddField(
model_name='camera',
name='last_modified',
field=models.DateTimeField(auto_now=True, help_text='', null=True, verbose_name='Last modified'),
),
migrations.AlterField(
model_name='camera',
name='name',
field=models.CharField(help_text='', max_length=150, null=True, verbose_name='Name', blank=True),
),
]
|
gpl-3.0
| 2,700,681,298,944,059,000 | 36.567568 | 109 | 0.553237 | false |
ShaolongHu/Nitrate
|
tcms/xmlrpc/utils.py
|
1
|
7751
|
# -*- coding: utf-8 -*-
import re
from django.db.models import Count, FieldDoesNotExist
from tcms.management.models import Product
COUNT_DISTINCT = 0
QUERY_DISTINCT = 1
ACCEPTABLE_BOOL_VALUES = ('0', '1', 0, 1, True, False)
def parse_bool_value(value):
if value in ACCEPTABLE_BOOL_VALUES:
if value is '0':
return False
elif value is '1':
return True
else:
return value
else:
raise ValueError('Unacceptable bool value.')
def pre_check_product(values):
if isinstance(values, dict):
if not values.get('product'):
return
product_str = values['product']
else:
product_str = values
if not (isinstance(product_str, str) or isinstance(product_str, int)):
raise ValueError('The type of product is not recognizable.')
try:
product_id = int(product_str)
return Product.objects.get(id=product_id)
except ValueError:
return Product.objects.get(name=product_str)
def pre_process_ids(value):
if isinstance(value, list):
return [isinstance(c, int) and c or int(c.strip()) for c in value if c]
if isinstance(value, str):
return [int(c.strip()) for c in value.split(',') if c]
if isinstance(value, int):
return [value]
raise TypeError('Unrecognizable type of ids')
def compare_list(src_list, dest_list):
return list(set(src_list) - set(dest_list))
def _lookup_fields_in_model(cls, fields):
"""Lookup ManyToMany fields in current table and related tables. For
distinct duplicate rows when using inner join
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param fields: fields in where condition.
@type fields: list
@return: whether use distinct or not
@rtype: bool
Example:
cls is TestRun (<class 'tcms.testruns.models.TestRun'>)
fields is 'plan__case__is_automated'
| | |----- Normal Field in TestCase
| |--------------- ManyToManyKey in TestPlan
|--------------------- ForeignKey in TestRun
1. plan is a ForeignKey field of TestRun and it will trigger getting the
related model TestPlan by django orm framework.
2. case is a ManyToManyKey field of TestPlan and it will trigger using
INNER JOIN to join TestCase, here will be many duplicated rows.
3. is_automated is a local field of TestCase only filter the rows (where
condition).
So this method will find out that case is a m2m field and notice the
outter method use distinct to avoid duplicated rows.
"""
for field in fields:
try:
field_info = cls._meta.get_field_by_name(field)
if field_info[-1]:
yield True
else:
if getattr(field_info[0], 'related', None):
cls = field_info[0].related.parent_model
except FieldDoesNotExist:
pass
def _need_distinct_m2m_rows(cls, fields):
"""Check whether the query string has ManyToMany field or not, return
False if the query string is empty.
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param fields: fields in where condition.
@type fields: list
@return: whether use distinct or not
@rtype: bool
"""
return next(_lookup_fields_in_model(cls, fields), False) \
if fields else False
def distinct_m2m_rows(cls, values, op_type):
"""By django model field looking up syntax, loop values and check the
condition if there is a multi-tables query.
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param values: fields in where condition.
@type values: dict
@return: QuerySet
@rtype: django.db.models.query.QuerySet
"""
flag = False
for field in values.iterkeys():
if '__' in field:
if _need_distinct_m2m_rows(cls, field.split('__')):
flag = True
break
qs = cls.objects.filter(**values)
if op_type == COUNT_DISTINCT:
return qs.aggregate(Count('pk', distinct=True))['pk__count'] if flag \
else qs.count()
elif op_type == QUERY_DISTINCT:
return qs.distinct() if flag else qs
else:
raise TypeError('Not implement op type %s' % op_type)
def distinct_count(cls, values):
return distinct_m2m_rows(cls, values, op_type=COUNT_DISTINCT)
def distinct_filter(cls, values):
return distinct_m2m_rows(cls, values, op_type=QUERY_DISTINCT)
class Comment(object):
def __init__(self, request, content_type, object_pks, comment=None):
self.request = request
self.content_type = content_type
self.object_pks = object_pks
self.comment = comment
def add(self):
import time
from django.contrib import comments
from django.contrib.comments import signals
from django.db import models
comment_form = comments.get_form()
try:
model = models.get_model(*self.content_type.split('.', 1))
targets = model._default_manager.filter(pk__in=self.object_pks)
except:
raise
for target in targets.iterator():
d_form = comment_form(target)
timestamp = str(time.time()).split('.')[0]
object_pk = str(target.pk)
data = {
'content_type': self.content_type,
'object_pk': object_pk,
'timestamp': timestamp,
'comment': self.comment
}
security_hash_dict = {
'content_type': self.content_type,
'object_pk': object_pk,
'timestamp': timestamp
}
data['security_hash'] = d_form.generate_security_hash(
**security_hash_dict)
form = comment_form(target, data=data)
# Response the errors if got
if not form.is_valid():
return form.errors
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = self.request.META.get("REMOTE_ADDR", None)
if self.request.user.is_authenticated():
comment.user = self.request.user
# Signal that the comment is about to be saved
signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
return
estimated_time_re = re.compile(r'^(\d+[d])?(\d+[h])?(\d+[m])?(\d+[s])?$')
def pre_process_estimated_time(value):
'''pre process estiamted_time.
support value - HH:MM:SS & xdxhxmxs
return xdxhxmxs
'''
if isinstance(value, str):
match = estimated_time_re.match(value.replace(' ', ''))
if match:
return value
else:
# FIXME: missed validation to invalid value in HH:MM:SS format.
# for example: sfsdfs:sfwerwe:rerwerwe
raw_estimated_time = value.split(':')
if len(raw_estimated_time) == 3:
hours, minutes, seconds = raw_estimated_time
return '{0}h{1}m{2}s'.format(hours, minutes, seconds)
else:
raise ValueError('Invaild estimated_time format.')
else:
raise ValueError('Invaild estimated_time format.')
|
gpl-2.0
| 613,022,896,010,496,300 | 30.636735 | 79 | 0.589859 | false |
misabelcarde/kivy-mobile-computing
|
src/BoardPlay.py
|
1
|
3832
|
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from Singleton import *
from Instructions import Instructions
from OwnBoard import BaseOwnBoard
from BoardFunctions import *
from SendPackage import *
from kivy.lang import Builder
Builder.load_string('''
<BoardPlay>:
id: boardPlayID
cols: 11
rows: 11
''')
class BoardPlay(GridLayout):
'''Generation of the board and functions for complete it'''
def __init__(self, **kwargs):
super(BoardPlay, self).__init__(**kwargs)
self.diccp = {}
self.generateBoard()
self.base = Singleton()
def generateBoard(self):
'''Generation of buttons and labels. Buttons are added to a dictionary where key:id and value:button'''
letters = ('A','B','C','D','E','F','G','H','I','J')
self.add_widget(Label(text=''))
if(Singleton().mode == 'TwoPlayers'):
for i in range(1,11):
self.add_widget(Label(text=str(i)))
for j in range(0,len(letters)):
self.add_widget(Label(text=letters[j]))
for k in range(0,10):
button=Button(id=(str(j)+'_'+str(k)),background_color=(0,2,255,1))
button.bind(on_press=self.putBoat)
self.add_widget(button)
self.diccp[str(j)+'_'+str(k)]=button
if Singleton().matrix[j][k] == 2:
button.background_color = [255,0,0,1]
button.text = "BOM"
button.disabled = True
elif Singleton().matrix[j][k] == -1:
button.background_color = [0,2,255,1]
button.text ='·'
button.font_size = 50
button.disabled = True
if Singleton().turno == 1 :
button.disabled = True
else:
#ListenSocket()
for i in range(1,11):
self.add_widget(Label(text=str(i)))
for j in range(0,len(letters)):
self.add_widget(Label(text=letters[j]))
for k in range(0,10):
button=Button(id=(str(j)+'_'+str(k)),background_color=(0,2,255,1))
button.bind(on_press=self.putBoat)
self.add_widget(button)
self.diccp[str(j)+'_'+str(k)]=button
if Singleton().matrix2[j][k] == 2:
button.background_color = [255,0,0,1]
button.text = "BOM"
button.disabled = True
elif Singleton().matrix2[j][k] == -1:
button.background_color = [0,2,255,1]
button.text ='·'
button.font_size = 50
button.disabled = True
def putBoat(self, button):
'''Behaviour of board's cells (buttons)'''
limits = getLimitingButtons(button)
boatsIds = getBoatsIds()
pos = getButtonPosition(button)
if(Singleton().mode == 'TwoPlayers'):
if self.base.matrix[pos[0]][pos[1]] == 1:
self.base.matrix[pos[0]][pos[1]] = 2
button.background_color = [255,0,0,1]
button.text = "BOM"
button.disabled = True
self.base.aux+=1
if self.base.aux == 20:
self.base.winner=1
elif self.base.matrix[pos[0]][pos[1]] == 0:
self.base.matrix[pos[0]][pos[1]] = -1
button.background_color = [0,2,255,1]
button.text ='·'
button.font_size = 50
button.disabled = True
self.base.turno = 1
else:
if self.base.matrix2[pos[0]][pos[1]] == 1:
self.base.matrix2[pos[0]][pos[1]] = 2
button.background_color = [255,0,0,1]
button.text = "BOM"
button.disabled = True
self.base.aux+=1
if self.base.aux == 20:
self.base.winner=1
elif self.base.matrix2[pos[0]][pos[1]] == 0:
self.base.matrix2[pos[0]][pos[1]] = -1
button.background_color = [0,2,255,1]
button.text ='·'
button.font_size = 50
button.disabled = True
#solo para 2 jugadores self.base.turno = 1
Singleton().gameboard.clear_widgets(children=None)
Singleton().gameboard2.clear_widgets(children=None)
if self.base.mode == 'TwoPlayers':
Singleton().gameboard2.on_pre_enter()
else:
Singleton().gameboard.on_pre_enter()
send(Singleton().opponentIP,str(self.base.matrix2))
|
mit
| 9,012,469,336,740,509,000 | 29.388889 | 105 | 0.627743 | false |
vladikoff/loop-server
|
loadtests/loadtest.py
|
1
|
5543
|
from gevent import monkey
monkey.patch_all()
import json
import gevent
from requests_hawk import HawkAuth
from loads.case import TestCase
class TestLoop(TestCase):
def setUp(self):
self.wss = []
def tearDown(self):
for ws in self.wss:
ws.close()
# XXX this is missing in ws4py
ws._th.join()
if ws.sock:
ws.sock.close()
def _send_ws_message(self, ws, **msg):
return ws.send(json.dumps(msg))
def create_ws(self, *args, **kw):
ws = TestCase.create_ws(self, *args, **kw)
self.wss.append(ws)
return ws
def test_all(self):
self.register()
token = self.generate_token()
call_data = self.initiate_call(token)
calls = self.list_pending_calls()
self._test_websockets(token, call_data, calls)
def _test_websockets(self, token, call_data, calls):
progress_url = call_data['progressURL']
websocket_token = call_data['websocketToken']
call_id = call_data['callId']
caller_alerts = []
callee_alerts = []
self.connected = False
def _handle_callee(message_data):
message = json.loads(message_data.data)
callee_alerts.append(message)
state = message.get('state')
messageType = message.get('messageType')
if messageType == "progress" and state == "connecting":
self._send_ws_message(
callee_ws,
messageType="action",
event="media-up")
caller_ws.receive()
elif messageType == "progress" and state == "connected":
self.connected = True
def _handle_caller(message_data):
message = json.loads(message_data.data)
caller_alerts.append(message)
state = message.get('state')
messageType = message.get('messageType')
if messageType == "hello" and state == "init":
# This is the first message, Ask the second party to connect.
self._send_ws_message(
callee_ws,
messageType='hello',
auth=calls[0]['websocketToken'],
callId=call_id)
callee_ws.receive()
elif messageType == "progress" and state == "alerting":
self._send_ws_message(
caller_ws,
messageType="action",
event="accept")
callee_ws.receive()
elif messageType == "progress" and state == "connecting":
self._send_ws_message(
caller_ws,
messageType="action",
event="media-up")
callee_ws.receive()
elif messageType == "progress" and state == "half-connected":
caller_ws.receive()
elif messageType == "progress" and state == "connected":
self.connected = True
# let's connect to the web socket until it gets closed
callee_ws = self.create_ws(progress_url, callback=_handle_callee)
caller_ws = self.create_ws(progress_url, callback=_handle_caller)
self._send_ws_message(
caller_ws,
messageType='hello',
auth=websocket_token,
callId=call_id)
while not self.connected:
gevent.sleep(.5)
def _get_json(self, resp):
try:
return resp.json()
except Exception:
print resp.text
raise
def register(self):
resp = self.session.post(
self.server_url + '/registration',
data={'simple_push_url': 'http://httpbin.org/deny'})
self.assertEquals(200, resp.status_code,
"Registration failed: %s" % resp.content)
try:
self.hawk_auth = HawkAuth(
hawk_session=resp.headers['hawk-session-token'],
server_url=self.server_url)
except KeyError:
print resp
raise
def generate_token(self):
resp = self.session.post(
self.server_url + '/call-url',
data=json.dumps({'callerId': 'alexis@mozilla.com'}),
headers={'Content-Type': 'application/json'},
auth=self.hawk_auth
)
self.assertEquals(resp.status_code, 200,
"Call-Url creation failed: %s" % resp.content)
data = self._get_json(resp)
call_url = data.get('callUrl', data.get('call_url'))
return call_url.split('/').pop()
def initiate_call(self, token):
# This happens when not authenticated.
resp = self.session.post(
self.server_url + '/calls/%s' % token,
data=json.dumps({"callType": "audio-video"}),
headers={'Content-Type': 'application/json'}
)
self.assertEquals(resp.status_code, 200,
"Call Initialization failed: %s" % resp.content)
return self._get_json(resp)
def list_pending_calls(self):
resp = self.session.get(
self.server_url + '/calls?version=200',
auth=self.hawk_auth)
data = self._get_json(resp)
return data['calls']
def revoke_token(self, token):
# You don't need to be authenticated to revoke a token.
self.session.delete(self.server_url + '/call-url/%s' % token)
|
mpl-2.0
| -7,240,788,177,399,997,000 | 31.605882 | 77 | 0.532383 | false |
ravenshooter/BA_Analysis
|
Preprocess.py
|
1
|
5604
|
import numpy
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy
import mdp
import csv
from thread import start_new_thread
import DataSet
from DataAnalysis import plot
from Main import getProjectPath
def readFileToNumpy(fileName):
reader=csv.reader(open(fileName,"rb"),delimiter=',')
x=list(reader)
return numpy.array(x[1:]).astype('float')
def separateInputData(fileData,removeErrors=True):
if removeErrors:
error_inds = fileData[:,-1]==False
fileData = fileData[error_inds]
fused = numpy.atleast_2d(fileData[:,1:4])
gyro = numpy.atleast_2d(fileData[:,4:7])
acc = numpy.atleast_2d(fileData[:,7:10])
targets = numpy.atleast_2d(fileData[:,10:])
return fused, gyro, acc, targets
def transformToDelta(vals):
newVals = numpy.zeros((len(vals),len(vals[0])))
for i in range(1,len(vals)):
newVals[i-1] = vals[i]-vals[i-1]
return newVals
def removeLOverflow(fused):
for j in range(0,3):
for i in range(1,len(fused)):
if numpy.abs(fused[i-1,j] - fused[i,j]) > numpy.pi:
fused[i:,j] = fused[i:,j] * -1
return fused
def applyActivationFilter(inputData, width):
actLevel = numpy.sum(numpy.abs(inputData),1)
target = numpy.zeros((len(inputData),1))
for i in range(width,len(inputData-width)):
target[i] = numpy.mean(actLevel[i-width:i+width])
return target
def centerAndNormalize(inputData):
means = numpy.mean(inputData, 0)
centered = inputData - means
vars = numpy.std(centered, 0)
normalized = centered/vars
return normalized, means, vars
def getTrainingBeginAndEndIndex(targetSig):
beginInd = 0
endInd = len(targetSig)
for i in range(0,len(targetSig)):
if targetSig[i] == 1:
beginInd= i-1;
break
for i in range(0,len(targetSig)):
if targetSig[len(targetSig)-1-i] == 1:
endInd= len(targetSig)-i;
break
return beginInd,endInd
def formatDataSet(data):
print data.shape
newStart = input("Start:")
newEnd = input("End:")
newData = data[newStart:newEnd,:]
return newData
def formatTargetFilter(data):
treshold = input('Treshold:')
targetFunction = applyFormatTargetFilter(data, treshold)
plt.figure()
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.plot(targetFunction)
return targetFunction
def applyFormatTargetFilter(data, treshold):
targetFunction = (data[:,10] > treshold).astype(float)
return numpy.atleast_2d(targetFunction).T
def removeArea(data):
cutOutStart = input("Start:")
cutOutEnd = input("End:")
newDataStart = data[:cutOutStart,:]
newDataEnd = data[cutOutEnd:,:]
return numpy.concatenate((newDataStart,newDataEnd))
def plotData(data):
plt.figure()
plt.clf()
plt.subplot(411)
plt.title('Fused')
plt.plot(data[:,0:3])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(412)
plt.title('Gyro')
plt.plot(data[:,3:6])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(413)
plt.title('Acc')
plt.plot(data[:,6:9])
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.subplot(414)
plt.title('Targets')
plt.plot(data[:,9])
plt.plot(data[:,10])
plt.show()
def writeToCSV(data,fileName):
numpy.savetxt(getProjectPath()+"\\dataSets\\"+fileName+".csv", data, delimiter=";")
def safeToDataSet(fileName, data, means, stds, gestures, targetTreshold):
ds = DataSet.DataSet(data[:,0:3],data[:,3:6],data[:,6:9],numpy.append(data[:,9:], applyFormatTargetFilter(data, targetTreshold), 1), \
means, stds, gestures)
ds.writeToFile(fileName)
def load(nr):
global i
plt.close('all')
i = readFile("nadja\\nadja_"+str(nr)+".csv")
plotData(i)
def safe(inputData,aaa,nr):
writeToCSV(numpy.concatenate((inputData,numpy.atleast_2d(aaa).T),1),"nadja_fitted_"+str(nr))
def readFile(fileName):
return readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
if __name__ == '__main__':
#def main():
inputFileName = ["2016-03-14-10-30-47-nike_fullSet_0.csv"]
fileData = numpy.zeros((1,31))
for fileName in inputFileName:
newData = readFileToNumpy(getProjectPath()+'dataSets\\'+fileName)
print newData.shape
fileData = numpy.append(fileData,newData,0)
fused, gyro, acc, targets = separateInputData(fileData)
#fused = removeLOverflow(fused)
#fused = transformToDelta(fused)
_, f_means, f_stds = centerAndNormalize(fused)
_, g_means, g_stds = centerAndNormalize(gyro)
_, a_means, a_stds = centerAndNormalize(acc)
means = numpy.concatenate((f_means,g_means,a_means),0)
stds = numpy.concatenate((f_stds,g_stds,a_stds),0)
gestures = numpy.max(targets,0)
dataSets = []
gestureSets = []
for i in range(0,len(targets[0])):
start, end = getTrainingBeginAndEndIndex(targets[:,i])
t_fused = fused[start:end,:]
t_gyro = gyro[start:end,:]
t_acc = acc[start:end,:]
t_target =numpy.atleast_2d(targets[start:end,i]).T
t_accFilter = applyActivationFilter(numpy.concatenate((t_fused,t_gyro,t_acc),1),6)
a = numpy.concatenate((t_fused,t_gyro,t_acc,t_target,t_accFilter),1)
dataSets.append(a)
gestureSets.append(numpy.max(targets[start:end,:],0))
|
mit
| 5,430,040,770,081,094,000 | 27.74359 | 138 | 0.618487 | false |
igel-kun/pyload
|
module/plugins/hoster/JWPlayerBased.py
|
1
|
3736
|
# -*- coding: utf-8 -*-
import re
from module.network.CookieJar import CookieJar
from module.network.PhantomRequest import PhantomRequest
from module.plugins.internal.misc import eval_js_script, get_domain, make_oneline
from module.plugins.internal.XFSHoster import XFSHoster
class JWPlayerBased(XFSHoster):
__name__ = "JWPlayerBased"
__type__ = "hoster"
__version__ = "0.01"
__pattern__ = r"undefined"
__config__ = [("activated" , "bool" , "Activated" , True )]
__description__ = """JWPlayerBased plugin"""
__author_name__ = ("igel")
__author_mail__ = ("")
INFO_PATTERN = None
NAME_PATTERN = r'<[tT]itle>(?:[wW]atch )?(?P<N>.*?)</[Tt]itle>'
SIZE_PATTERN = None
LINK_PATTERN = None
# how to find the jwplayer code in the HTML
JW_PATTERN = r"<script .*?javascript[\"'][^>]*>\s*(eval.*?)(?:</script>|$)"
# how to extract the link from the decoded javascript call to jwplayer
JW_LINK_PATTERN = r"play.*?{file:[\"']([^\"']*)[\"']"
def setup(self):
self.multiDL = True
self.chunkLimit = 1
self.resumeDownload = True
# use phantomJS to download websites; this will circumvent JS obfuscation but makes everything a bit slower
try:
self.req.http.close()
finally:
self.req.http = PhantomRequest(
cookies = CookieJar(None),
options = self.pyload.requestFactory.getOptions())
def init(self):
self.__pattern__ = self.pyload.pluginManager.hosterPlugins[self.classname]['pattern']
if not self.PLUGIN_DOMAIN:
m = re.match(self.__pattern__, self.pyfile.url)
try:
self.PLUGIN_DOMAIN = m.group("DOMAIN").lower()
except:
self.PLUGIN_DOMAIN = get_domain(m.group(0))
self.PLUGIN_NAME = "".join(part.capitalize() for part in re.split(r'\.|\d+|-', self.PLUGIN_DOMAIN) if part != '.')
if not self.LINK_PATTERN:
link_patterns = filter(None, [self.JW_PATTERN, self.JW_LINK_PATTERN])
if link_patterns:
self.LINK_PATTERN = "(?:%s)" % ('|'.join(link_patterns))
self.log_debug('our link pattern is: %s' % self.LINK_PATTERN)
if not self.ERROR_PATTERN:
error_patterns = filter(None, [self.OFFLINE_PATTERN, self.TEMP_OFFLINE_PATTERN])
if error_patterns:
self.ERROR_PATTERN = "(?:%s)" % ('|'.join(error_patterns))
self.log_debug('our error pattern is: %s' % self.ERROR_PATTERN)
def handle_free(self, pyfile):
self.log_debug('calling XFSs handle_free to click buttons...')
super(JWPlayerBased, self).handle_free(pyfile)
self.log_debug('XFSs handle_free found: %s' % make_oneline(self.link))
# step 2: extract file URL
m = re.search(self.JW_LINK_PATTERN, self.link, re.MULTILINE | re.DOTALL)
if m is not None:
for link_match in m.groups():
if link_match:
self.link = link_match
if 'eval' in self.link:
self.log_debug(_("evaluating script to get call to jwplayer"))
js_code = re.sub('eval', '', self.link)
data = eval_js_script(js_code)
# now, data should be a call to jwplayer in plaintext
# step 2: extract file URL
m = re.search(self.JW_LINK_PATTERN, data, re.MULTILINE | re.DOTALL)
if m is not None:
for link_match in m.groups():
if link_match:
self.link = link_match
else:
self.error("could not parse call to JWplayer")
|
gpl-3.0
| 2,496,716,507,070,426,000 | 35.990099 | 124 | 0.56531 | false |
kaste/mockito-python
|
tests/modulefunctions_test.py
|
1
|
3675
|
# Copyright (c) 2008-2016 Szczepan Faber, Serhiy Oplakanets, Herr Kaste
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from .test_base import TestBase
from mockito import when, unstub, verify, any
from mockito.invocation import InvocationError
from mockito.verification import VerificationError
class ModuleFunctionsTest(TestBase):
def tearDown(self):
unstub()
def testUnstubs(self):
when(os.path).exists("test").thenReturn(True)
unstub()
self.assertEqual(False, os.path.exists("test"))
def testStubs(self):
when(os.path).exists("test").thenReturn(True)
self.assertEqual(True, os.path.exists("test"))
def testStubsConsecutiveCalls(self):
when(os.path).exists("test").thenReturn(False).thenReturn(True)
self.assertEqual(False, os.path.exists("test"))
self.assertEqual(True, os.path.exists("test"))
def testStubsMultipleClasses(self):
when(os.path).exists("test").thenReturn(True)
when(os.path).dirname(any(str)).thenReturn("mocked")
self.assertEqual(True, os.path.exists("test"))
self.assertEqual("mocked", os.path.dirname("whoah!"))
def testVerifiesSuccesfully(self):
when(os.path).exists("test").thenReturn(True)
os.path.exists("test")
verify(os.path).exists("test")
def testFailsVerification(self):
when(os.path).exists("test").thenReturn(True)
self.assertRaises(VerificationError, verify(os.path).exists, "test")
def testFailsOnNumberOfCalls(self):
when(os.path).exists("test").thenReturn(True)
os.path.exists("test")
self.assertRaises(VerificationError, verify(os.path, times=2).exists,
"test")
def testStubsTwiceAndUnstubs(self):
when(os.path).exists("test").thenReturn(False)
when(os.path).exists("test").thenReturn(True)
self.assertEqual(True, os.path.exists("test"))
unstub()
self.assertEqual(False, os.path.exists("test"))
def testStubsTwiceWithDifferentArguments(self):
when(os.path).exists("Foo").thenReturn(False)
when(os.path).exists("Bar").thenReturn(True)
self.assertEqual(False, os.path.exists("Foo"))
self.assertEqual(True, os.path.exists("Bar"))
def testShouldThrowIfWeStubAFunctionNotDefinedInTheModule(self):
self.assertRaises(InvocationError,
lambda: when(os).walk_the_line().thenReturn(None))
def testEnsureWeCanMockTheClassOnAModule(self):
from . import module
when(module).Foo().thenReturn('mocked')
assert module.Foo() == 'mocked'
|
mit
| 1,511,132,505,961,215,000 | 35.75 | 79 | 0.69551 | false |
kkmonlee/Project-Euler-Solutions
|
Python/p412v1.py
|
1
|
1413
|
from kkmonleeUtils import EulerTools
import math
upperlim = 75000000
primes = EulerTools.primes_sieve(upperlim)
factor_dict = {}
# Compute how many of each prime factors are there in 75000000!
for p in primes:
num_factors = 0
q = p
while upperlim // q > 0:
num_factors += upperlim // q
q *= p
factor_dict[p] = num_factors
young_dict = {}
# Now count how many of each prime factor are the product
# of the Young tableau
index = 0
while primes[index] < 20000:
p = primes[index]
degree = 1
young_dict[p] = 0
while p ** degree < 20000:
mult = 1
while p ** degree * mult < 20000:
if p ** degree * mult <= 5000:
young_dict[p] += 2 * p ** degree * mult
elif p ** degree * mult < 10000:
young_dict[p] += 2 * (10000 - p ** degree * mult)
elif 10000 < p ** degree * mult <= 15000:
young_dict[p] += p ** degree * mult - 10000
elif p ** degree * mult > 15000:
young_dict[p] += 20000 - p ** degree * mult
mult += 1
degree += 1
index += 1
answer = 1
for k in factor_dict.keys():
if k in young_dict:
mult = EulerTools.fast_exp(k, factor_dict[k] - young_dict[k], 76543217)
else:
mult = EulerTools.fast_exp(k, factor_dict[k], 76543217)
answer *= mult
answer = answer % 76543217
print(answer)
|
gpl-3.0
| -6,085,984,801,262,973,000 | 27.26 | 79 | 0.556263 | false |
tcmitchell/geni-ch
|
plugins/chapiv1rpc/chapi/DelegateBase.py
|
2
|
4206
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
# Base class for delegate bases that want to authenticate, authorize,
# Return GENI-style returns
import tools.pluginmanager as pm
# from amsoil.config import expand_amsoil_path
from exceptions import *
from Exceptions import *
import traceback
import gcf.geni.util.cred_util
class DelegateBase(object):
def __init__(self, logger):
self.logger = logger
def auth(self, client_cert, credentials, slice_urn=None, privileges=()):
# check variables
if not isinstance(privileges, tuple):
raise TypeError("Privileges need to be a tuple.")
# collect credentials (only GENI certs, version ignored)
geni_credentials = []
for c in credentials:
if c['geni_type'] == 'geni_sfa':
geni_credentials.append(c['geni_value'])
# get the cert_root
config = pm.getService("config")
# cert_root = expand_amsoil_path(config.get("chapiv1rpc.ch_cert_root"))
cert_root = config.get("chapiv1rpc.ch_cert_root")
if client_cert == None:
# work around if the certificate could not be acquired due to the shortcommings of the werkzeug library
if config.get("flask.debug"):
import gcf.sfa.trust.credential as cred
client_cert = cred.Credential(string=geni_credentials[0]).gidCaller.save_to_string(save_parents=True)
else:
raise CHAPIv1ForbiddenError("Could not determine the client SSL certificate")
# test the credential
try:
cred_verifier = gcf.geni.cred_util.CredentialVerifier(cert_root)
cred_verifier.verify_from_strings(client_cert, geni_credentials, slice_urn, privileges)
except Exception as e:
raise CHAPIv1ForbiddenError(str(e))
user_gid = gid.GID(string=client_cert)
user_urn = user_gid.get_urn()
user_uuid = user_gid.get_uuid()
user_email = user_gid.get_email()
return user_urn, user_uuid, user_email # TODO document return
def _errorReturn(self, e):
"""Assembles a GENI compliant return result for faulty methods."""
if not isinstance(e, CHAPIv1BaseError): # convert common errors into CHAPIv1GeneralError
e = CHAPIv1ServerError(str(e))
# do some logging
self.logger.error(e)
self.logger.error(traceback.format_exc())
return {'code' : e.code , 'value' : None, 'output' : str(e) }
def _successReturn(self, result):
"""Assembles a GENI compliant return result for successful methods."""
return { 'code' : 0 , 'value' : result, 'output' : '' }
def subcall_options(self, options):
"""Generate options dictionary for subordinate calls to other
clearinghouse services.
"""
sopt = dict()
sfkeys = ['ENVIRON', 'speaking_for']
for sfkey in sfkeys:
if sfkey in options:
sopt[sfkey] = options[sfkey]
return sopt
|
mit
| 7,589,951,973,677,695,000 | 41.484848 | 117 | 0.644318 | false |
aschn/goodtechgigs
|
docs/conf.py
|
1
|
7831
|
# -*- coding: utf-8 -*-
#
# goodtechgigs documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'goodtechgigs'
copyright = u"2015, Anna Schneider"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'goodtechgigsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'goodtechgigs.tex',
u'goodtechgigs Documentation',
u"Anna Schneider", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'goodtechgigs', u'goodtechgigs Documentation',
[u"Anna Schneider"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'goodtechgigs', u'goodtechgigs Documentation',
u"Anna Schneider", 'goodtechgigs',
'Tech gigs for good.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
apache-2.0
| 2,750,946,561,795,250,000 | 30.963265 | 80 | 0.697867 | false |
RealImpactAnalytics/airflow
|
airflow/contrib/operators/segment_track_event_operator.py
|
1
|
2675
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.contrib.hooks.segment_hook import SegmentHook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class SegmentTrackEventOperator(BaseOperator):
"""
Send Track Event to Segment for a specified user_id and event
:param user_id: The ID for this user in your database
:type user_id: string
:param event: The name of the event you're tracking
:type event: string
:param properties: A dictionary of properties for the event.
:type properties: dict
:param segment_conn_id: The connection ID to use when connecting to Segment.
:type segment_conn_id: string
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
:type segment_debug_mode: boolean
"""
template_fields = ('user_id', 'event', 'properties')
ui_color = '#ffd700'
@apply_defaults
def __init__(self,
user_id,
event,
properties=None,
segment_conn_id='segment_default',
segment_debug_mode=False,
*args,
**kwargs):
super(SegmentTrackEventOperator, self).__init__(*args, **kwargs)
self.user_id = user_id
self.event = event
properties = properties or {}
self.properties = properties
self.segment_debug_mode = segment_debug_mode
self.segment_conn_id = segment_conn_id
def execute(self, context):
hook = SegmentHook(segment_conn_id=self.segment_conn_id,
segment_debug_mode=self.segment_debug_mode)
self.log.info(
'Sending track event ({0}) for user id: {1} with properties: {2}'.
format(self.event, self.user_id, self.properties))
hook.track(self.user_id, self.event, self.properties)
|
apache-2.0
| -8,805,401,860,075,005,000 | 37.768116 | 83 | 0.669907 | false |
imminent-tuba/thesis
|
server/chatterbot/chatterbot/adapters/storage/jsondatabase.py
|
1
|
4775
|
from chatterbot.adapters.storage import StorageAdapter
from chatterbot.adapters.exceptions import EmptyDatabaseException
from chatterbot.conversation import Statement, Response
# from jsondb import Database
class JsonDatabaseAdapter(StorageAdapter):
"""
The JsonDatabaseAdapter is an interface that allows ChatterBot
to store the conversation as a Json-encoded file.
"""
def __init__(self, **kwargs):
super(JsonDatabaseAdapter, self).__init__(**kwargs)
database_path = self.kwargs.get("database", "database.db")
self.database = Database(database_path)
def _keys(self):
# The value has to be cast as a list for Python 3 compatibility
return list(self.database[0].keys())
def count(self):
return len(self._keys())
def find(self, statement_text):
values = self.database.data(key=statement_text)
if not values:
return None
# Build the objects for the response list
response_list = self.deserialize_responses(values["in_response_to"])
values["in_response_to"] = response_list
return Statement(statement_text, **values)
def remove(self, statement_text):
"""
Removes the statement that matches the input text.
Removes any responses from statements if the response text matches the
input text.
"""
for statement in self.filter(in_response_to__contains=statement_text):
statement.remove_response(statement_text)
self.update(statement)
self.database.delete(statement_text)
def deserialize_responses(self, response_list):
"""
Takes the list of response items and returns the
list converted to object versions of the responses.
"""
in_response_to = []
for response in response_list:
text = response["text"]
del(response["text"])
in_response_to.append(
Response(text, **response)
)
return in_response_to
def _all_kwargs_match_values(self, kwarguments, values):
for kwarg in kwarguments:
if "__" in kwarg:
kwarg_parts = kwarg.split("__")
key = kwarg_parts[0]
identifier = kwarg_parts[1]
if identifier == "contains":
text_values = []
for val in values[key]:
text_values.append(val["text"])
if (kwarguments[kwarg] not in text_values) and (
kwarguments[kwarg] not in values[key]):
return False
if kwarg in values:
if values[kwarg] != kwarguments[kwarg]:
return False
return True
def filter(self, **kwargs):
"""
Returns a list of statements in the database
that match the parameters specified.
"""
results = []
for key in self._keys():
values = self.database.data(key=key)
# Add the text attribute to the values
values["text"] = key
if self._all_kwargs_match_values(kwargs, values):
# Build the objects for the response list
in_response_to = values["in_response_to"]
response_list = self.deserialize_responses(in_response_to)
values["in_response_to"] = response_list
# Remove the text attribute from the values
text = values.pop("text")
results.append(
Statement(text, **values)
)
return results
def update(self, statement):
# Do not alter the database unless writing is enabled
if not self.read_only:
data = statement.serialize()
# Remove the text key from the data
del(data['text'])
self.database.data(key=statement.text, value=data)
# Make sure that an entry for each response exists
for response_statement in statement.in_response_to:
response = self.find(response_statement.text)
if not response:
response = Statement(response_statement.text)
self.update(response)
return statement
def get_random(self):
from random import choice
if self.count() < 1:
raise EmptyDatabaseException()
statement = choice(self._keys())
return self.find(statement)
def drop(self):
"""
Remove the json file database completely.
"""
import os
if os.path.exists(self.database.path):
os.remove(self.database.path)
|
mit
| -6,168,372,665,822,359,000 | 30.006494 | 78 | 0.573194 | false |
andymckay/zamboni
|
mkt/developers/tests/test_providers.py
|
1
|
14056
|
from datetime import datetime
from django.core.exceptions import ObjectDoesNotExist
from mock import ANY, Mock, patch
from nose.tools import eq_, ok_, raises
from amo.tests import app_factory, TestCase
from mkt.constants.payments import (PROVIDER_BANGO, PROVIDER_BOKU,
PROVIDER_REFERENCE)
from mkt.developers.models import PaymentAccount, SolitudeSeller
from mkt.developers.providers import (account_check, Bango, Boku, get_provider,
Reference)
from mkt.site.fixtures import fixture
from mkt.users.models import UserProfile
class Patcher(object):
"""
This class patch your test case so that any attempt to call solitude
from zamboni through these classes will use the mock.
Use this class as mixin on any tests that alter payment accounts.
If you override setUp or tearDown be sure to call super.
"""
def setUp(self, *args, **kw):
super(Patcher, self).setUp(*args, **kw)
# Once everything has moved over to the provider, this one
# can be remoed.
client_patcher = patch('mkt.developers.models.client',
name='test_providers.Patcher.client_patcher')
self.patched_client = client_patcher.start()
self.patched_client.patcher = client_patcher
self.addCleanup(client_patcher.stop)
bango_patcher = patch('mkt.developers.providers.Bango.client',
name='test_providers.Patcher.bango_patcher')
self.bango_patcher = bango_patcher.start()
self.bango_patcher.patcher = bango_patcher
self.addCleanup(bango_patcher.stop)
bango_p_patcher = patch(
'mkt.developers.providers.Bango.client_provider',
name='test_providers.Patcher.bango_p_patcher')
self.bango_p_patcher = bango_p_patcher.start()
self.bango_p_patcher.patcher = bango_p_patcher
self.addCleanup(bango_p_patcher.stop)
boku_patcher = patch('mkt.developers.providers.Boku.client',
name='test_providers.Patcher.boku_patcher')
self.boku_patcher = boku_patcher.start()
self.boku_patcher.patcher = boku_patcher
self.addCleanup(boku_patcher.stop)
ref_patcher = patch('mkt.developers.providers.Reference.client',
name='test_providers.Patcher.ref_patcher')
self.ref_patcher = ref_patcher.start()
self.ref_patcher.patcher = ref_patcher
self.addCleanup(ref_patcher.stop)
generic_patcher = patch('mkt.developers.providers.Provider.generic',
name='test_providers.Patcher.generic_patcher')
self.generic_patcher = generic_patcher.start()
self.generic_patcher.patcher = generic_patcher
self.addCleanup(generic_patcher.stop)
class TestSetup(TestCase):
def test_multiple(self):
with self.settings(PAYMENT_PROVIDERS=['bango', 'reference'],
DEFAULT_PAYMENT_PROVIDER='bango'):
eq_(get_provider().name, 'bango')
class TestBase(TestCase):
def test_check(self):
provider = Reference()
@account_check
def test(self, account):
pass
provider.test = test
provider.test(provider, PaymentAccount(provider=PROVIDER_REFERENCE))
with self.assertRaises(ValueError):
provider.test(provider, PaymentAccount(provider=PROVIDER_BOKU))
class TestBango(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self):
super(TestBango, self).setUp()
self.user = UserProfile.objects.filter()[0]
self.app = app_factory()
self.make_premium(self.app)
self.seller = SolitudeSeller.objects.create(
resource_uri='sellerres', user=self.user
)
self.account = PaymentAccount.objects.create(
solitude_seller=self.seller,
user=self.user, name='paname', uri='acuri',
inactive=False, seller_uri='selluri',
account_id=123, provider=PROVIDER_BANGO
)
self.bango = Bango()
def test_create(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'gpuri'}
self.bango_patcher.product.get_object_or_404.return_value = {
'resource_uri': 'bpruri', 'bango_id': 'bango#', 'seller': 'selluri'
}
uri = self.bango.product_create(self.account, self.app)
eq_(uri, 'bpruri')
def test_create_new(self):
self.bango_patcher.product.get_object_or_404.side_effect = (
ObjectDoesNotExist)
self.bango_p_patcher.product.post.return_value = {
'resource_uri': '', 'bango_id': 1
}
self.bango.product_create(self.account, self.app)
ok_('packageId' in
self.bango_p_patcher.product.post.call_args[1]['data'])
def test_terms_bleached(self):
self.bango_patcher.sbi.agreement.get_object.return_value = {
'text': '<script>foo</script><h3></h3>'}
eq_(self.bango.terms_retrieve(Mock())['text'],
u'<script>foo</script><h3></h3>')
class TestReference(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self, *args, **kw):
super(TestReference, self).setUp(*args, **kw)
self.user = UserProfile.objects.get(pk=999)
self.ref = Reference()
def test_setup_seller(self):
self.ref.setup_seller(self.user)
ok_(SolitudeSeller.objects.filter(user=self.user).exists())
def test_account_create(self):
data = {'account_name': 'account', 'name': 'f', 'email': 'a@a.com'}
self.patched_client.api.generic.seller.post.return_value = {
'resource_uri': '/1'
}
res = self.ref.account_create(self.user, data)
acct = PaymentAccount.objects.get(user=self.user)
eq_(acct.provider, PROVIDER_REFERENCE)
eq_(res.pk, acct.pk)
self.ref_patcher.sellers.post.assert_called_with(data={
'status': 'ACTIVE',
'email': 'a@a.com',
'uuid': ANY,
'name': 'f',
'seller': '/1'
})
def make_account(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/f/b/1',
name='account name',
provider=PROVIDER_REFERENCE)
def test_terms_retrieve(self):
account = self.make_account()
self.ref.terms_retrieve(account)
assert self.ref_patcher.terms.called
def test_terms_bleached(self):
account = self.make_account()
account_mock = Mock()
account_mock.get.return_value = {'text':
'<script>foo</script><a>bar</a>'}
self.ref_patcher.terms.return_value = account_mock
eq_(self.ref.terms_retrieve(account)['text'],
u'<script>foo</script><a>bar</a>')
def test_terms_update(self):
seller_mock = Mock()
seller_mock.get.return_value = {
'id': 1,
'resource_uri': '/a/b/c',
'resource_name': 'x',
'reference': {}
}
seller_mock.put.return_value = {}
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.terms_update(account)
eq_(account.reload().agreed_tos, True)
assert self.ref_patcher.sellers.called
seller_mock.get.assert_called_with()
seller_mock.put.assert_called_with({
'agreement': datetime.now().strftime('%Y-%m-%d'),
'seller': ''
})
def test_account_retrieve(self):
account = self.make_account()
acc = self.ref.account_retrieve(account)
eq_(acc, {'account_name': 'account name'})
assert self.ref_patcher.sellers.called
def test_account_update(self):
account_data = {
'status': '',
'resource_name': 'sellers',
'uuid': 'custom-uuid',
'agreement': '',
'email': 'a@a.com',
'id': 'custom-uuid',
'resource_uri': '/provider/reference/sellers/custom-uuid/',
'account_name': u'Test',
'name': 'Test',
}
seller_mock = Mock()
seller_mock.get.return_value = account_data
self.ref_patcher.sellers.return_value = seller_mock
account = self.make_account()
self.ref.account_update(account, account_data)
eq_(self.ref.forms['account']().hidden_fields()[0].name, 'uuid')
eq_(account.reload().name, 'Test')
seller_mock.put.assert_called_with(account_data)
def test_product_create_exists(self):
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
# Product should have been got from zippy, but not created by a post.
assert not self.ref_patcher.products.post.called
def test_product_create_not(self):
self.generic_patcher.product.get_object_or_404.return_value = {
'external_id': 'ext',
'resource_uri': '/f',
'public_id': 'public:id',
'seller_uuids': {'reference': None}
}
self.ref_patcher.products.get.return_value = []
self.ref_patcher.products.post.return_value = {'resource_uri': '/f'}
account = self.make_account()
app = app_factory()
self.ref.product_create(account, app)
self.ref_patcher.products.post.assert_called_with(data={
'seller_product': '/f',
'seller_reference': '/f/b/1',
'name': unicode(app.name),
'uuid': ANY,
})
class TestBoku(Patcher, TestCase):
fixtures = fixture('user_999')
def setUp(self, *args, **kw):
super(TestBoku, self).setUp(*args, **kw)
self.user = UserProfile.objects.get(pk=999)
self.boku = Boku()
def make_account(self):
seller = SolitudeSeller.objects.create(user=self.user)
return PaymentAccount.objects.create(user=self.user,
solitude_seller=seller,
uri='/f/b/1',
name='account name',
provider=PROVIDER_BOKU)
def test_account_create(self):
data = {'account_name': 'account',
'service_id': 'b'}
res = self.boku.account_create(self.user, data)
acct = PaymentAccount.objects.get(user=self.user)
eq_(acct.provider, PROVIDER_BOKU)
eq_(acct.agreed_tos, True)
eq_(res.pk, acct.pk)
self.boku_patcher.seller.post.assert_called_with(data={
'seller': ANY,
'service_id': 'b',
})
def test_terms_update(self):
account = self.make_account()
assert not account.agreed_tos
response = self.boku.terms_update(account)
assert account.agreed_tos
assert response['accepted']
def test_create_new_product(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
boku_product_uri = '/boku/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 0},
'objects': [],
}
self.boku_patcher.product.post.return_value = {
'resource_uri': boku_product_uri,
'seller_product': generic_product_uri,
'seller_boku': account.uri,
}
product = self.boku.product_create(account, app)
eq_(product, boku_product_uri)
self.boku_patcher.product.post.assert_called_with(data={
'seller_boku': account.uri,
'seller_product': generic_product_uri,
})
def test_update_existing_product(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
existing_boku_product_uri = '/boku/product/1/'
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 1},
'objects': [{
'resource_uri': existing_boku_product_uri,
}],
}
patch_mock = Mock()
patch_mock.patch.return_value = {
'resource_uri': existing_boku_product_uri,
'seller_product': generic_product_uri,
'seller_boku': account.uri,
}
self.boku_patcher.by_url.return_value = patch_mock
product = self.boku.product_create(account, app)
eq_(product, existing_boku_product_uri)
self.boku_patcher.by_url.assert_called_with(existing_boku_product_uri)
patch_mock.patch.assert_called_with(data={
'seller_boku': account.uri,
'seller_product': generic_product_uri,
})
def test_multiple_existing_products_raises_exception(self):
account = self.make_account()
app = app_factory()
generic_product_uri = '/generic/product/1/'
self.generic_patcher.product.get_object_or_404.return_value = {
'resource_pk': 1,
'resource_uri': generic_product_uri,
}
self.boku_patcher.product.get.return_value = {
'meta': {'total_count': 2},
'objects': [],
}
with self.assertRaises(ValueError):
self.boku.product_create(account, app)
|
bsd-3-clause
| -3,848,961,013,239,043,000 | 36.28382 | 79 | 0.577334 | false |
ballesterus/UPhO
|
Get_fasta_from_Ref.py
|
1
|
3740
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import argparse
#Function definitions
def Fasta_to_Dict(File):
'''Creates a dictionary of FASTA sequences in a File, with seqIs as key to the sequences.'''
with open(File, 'r') as F:
Records = {}
for Line in F:
if Line.startswith('>'):
Seqid = Line.split(' ')[0].strip('>').strip('\n')
Seq= ''
Records[Seqid] = Seq
else:
Seq = Records[Seqid] + Line.strip('\n')
Records[Seqid] = Seq.upper()
return Records
def FastaRetriever(seqId, FastaDict):
"""Returns a FASTA formated record from a seqID and a fastaDict where fasta Id is key in FastaDict"""
try:
seq=FastaDict[seqId]
return ">%s\n%s\n" %(seqId,seq)
except:
print "\x1b[1;31;40mALERT: The sequence ID: %s was not found in the source Fasta file.\x1b[0m" % seqId
def main(query, outdir, prefix, reference):
handle = open(query, 'r')
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
print 'The output dir already exist!'
Counter = 0
seqSource = Fasta_to_Dict(reference)
for line in handle:
if len(line) > 0: # do not process empty lines
line = line.replace(' ', '' ) # remove white spaces
qlist = line.strip('\n').split(',')
qlist = [i for i in qlist if i != ""]
if line.startswith('#'): #means that filenames are provided in the input this being the fisrt field in the csv.
Name = qlist.pop(0)
OG_filename = Name.strip('#') + '.fasta'
OG_outfile = open(outdir + '/' + OG_filename, 'w')
else:
OG_filename = prefix + "_" + str(Counter) + ".fasta"
OG_outfile = open(outdir + '/' + OG_filename, 'w')
Counter += 1
for seqId in qlist:
seq=FastaRetriever(seqId, seqSource)
try:
OG_outfile.write(seq)
except:
print "There is a problem retrieving the seqID: %s. Verify the seqID is the exactly same in query and source files.\n" % seqId
exit(1)
print "Successfully created file: %s" % OG_filename
OG_outfile.close()
if __name__=='__main__':
parser = argparse.ArgumentParser(description='This script creates fasta files from a list of sequence idetifiers. It takes as input a file in which each line is list of of sequence identifiers to be written in multi-fasta file; and a Reference file, which contains the identifiers and their sequences. Fasta id in query and Reference should be identical. The output files are named with using a user defined prefix and a counter, or if a name defined by the user is preferred, this should be given as the first element of the list and identified by starting with "#" ')
parser.add_argument('-q', dest = 'query', type = str, default= 'None', help = 'File with wanted fasta identifiers separated by ",". ')
parser.add_argument('-o', dest= 'outdir', type =str, default= '.', help ='Name of the directory to use as output, if does no exist this wll be created. Default "."')
parser.add_argument('-p', dest= 'prefix', type = str, default= 'Group', help ='Prefix to use whe no group name is provided')
parser.add_argument('-R', dest= 'Reference', type = str, default= 'None', help ='A fasta file with the source fasta sequences in the input tree. If provided, a fasta file will be created for each ortholog found')
args, unknown = parser.parse_known_args()
main(args.query, args.outdir, args.prefix, args.Reference)
|
gpl-3.0
| -4,248,451,519,692,429,300 | 49.540541 | 575 | 0.604011 | false |
EzeAlbornoz5/TP-Ingenieria-web
|
doyourfreight/forums/views.py
|
1
|
12790
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseNotFound
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .forms import *
from .models import *
from shippings.models import *
def ShippingsPendingsNotifications(user):
userProfile = UserProfile.objects.filter(fk_user=user)
userType = userProfile[0].fk_userType.id
if userType == 1:
shippings = Ship.objects.filter(fk_userClient=userProfile, isNotificationForClient=True)
else:
shippings = Ship.objects.filter(fk_userCompany=userProfile, isNotificationForCompany=True,
fk_state__description='Pendiente')
return shippings
def ForumHome(request):
if request.method == 'GET':
data = {}
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
######################### Notifications #########################
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
if userProfile.fk_userType.pk == 1:
data['userType'] = 'Cliente'
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes')
else:
data['userType'] = 'Empresa'
data['forumsCategories'] = ForumCategories.objects.exclude(name='Reputación de empresas')
else:
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes').exclude(
name='Reputación de empresas')
else:
data['forumsCategories'] = ForumCategories.objects.exclude(name='Combinación de viajes').exclude(
name='Reputación de empresas')
return render(request, 'forum.html', data)
def ForumThreads(request, pk):
data = {}
forumCategory = ForumCategories.objects.get(pk=pk)
data['forumCategory'] = forumCategory
data['threads'] = Thread.objects.filter(fk_forum=forumCategory).order_by('-submitDate')[:50]
data['topThreads'] = Thread.objects.filter(fk_forum=forumCategory).order_by('-totalScore')[:10]
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'threads.html', data)
@login_required
def ForumNewThread(request, category):
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
data = {}
if request.method == 'POST':
form = FormThread(request.POST)
forumCategory = ForumCategories.objects.get(pk=category)
if form.is_valid():
print('Es valido')
threadSaved = form.save()
thread = Thread.objects.get(name=threadSaved.name, fk_forum=threadSaved.fk_forum,
content=threadSaved.content, fk_author=threadSaved.fk_author,
submitDate=threadSaved.submitDate)
return redirect('forums:forum_view_thread', forumCategory.pk, thread.pk)
else:
forumCategory = ForumCategories.objects.get(pk=category)
data['forumCategory'] = forumCategory
form = FormThread(initial={'fk_forum': forumCategory, 'fk_author': request.user})
data['form'] = form
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'new_thread.html', data)
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound('<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound('<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
def ForumViewThread(request, category, thread):
data = {'forumCategory': ForumCategories.objects.get(pk=category),
'replies': Reply.objects.filter(fk_thread__pk=thread).order_by('submitDate'),
'thread': Thread.objects.get(id=thread)}
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
data['form'] = FormReply(initial={'fk_thread': data['thread'], 'fk_author': request.user})
threadscore = ThreadScore.objects.filter(fk_user=request.user, fk_thread=data['thread'])
if len(threadscore) == 0:
data['like'] = 'Null'
else:
data['like'] = threadscore[0].like
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
else:
data['like'] = 'Null'
if request.method == 'POST':
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
if 'like' in request.POST or 'dislike' in request.POST:
thread = Thread.objects.get(pk=request.POST['threadID'])
threadscore, created = ThreadScore.objects.get_or_create(fk_thread=thread, fk_user=request.user)
if 'like' in request.POST:
if threadscore.like is None:
thread.totalScore += 1
else:
thread.totalScore += 2
threadscore.like = True
else:
if threadscore.like is None:
thread.totalScore -= 1
else:
thread.totalScore -= 2
threadscore.like = False
data['like'] = threadscore.like
threadscore.save()
thread.save()
data['thread'] = Thread.objects.get(id=thread.id)
else:
if 'report' in request.POST:
Report(request.POST['reportFrom'], request.POST['fromID'], request.POST['reason'], request.user)
else:
if 'comment' in request.POST:
data['thread'] = Thread.objects.get(pk=request.POST['fk_thread'])
data['forumCategory'] = data['thread'].fk_forum
data['replies'] = Reply.objects.filter(fk_thread__pk=request.POST['fk_thread']).order_by('submitDate')
form = FormReply(request.POST)
data['form'] = form
if form.is_valid():
form.save()
data['form'] = FormReply(initial={'fk_thread': data['thread'], 'fk_author': request.user})
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound(
'<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound(
'<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
data['reportsReplies'] = []
if request.user.is_authenticated:
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
reportThread = Reports.objects.filter(fk_userAuth=request.user, reportFrom=1, fromID=data['thread'].id)
reportsReplies = Reports.objects.filter(fk_userAuth=request.user, reportFrom=2)
for reportReply in reportsReplies:
data['reportsReplies'].append(reportReply.fromID)
if len(reportThread) == 0:
data['reportThread'] = False
else:
data['reportThread'] = True
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'thread_view.html', data)
# Funcion para generar el registro de la denuncia
def Report(reportFrom, fromID, reason, user):
report = Reports()
report.reportFrom = reportFrom
report.fromID = fromID
report.reasonReport = reason
report.fk_userAuth = user
report.save()
return None
@login_required
def ForumEditThread(request, category, thread):
data = {'categoryID': category, 'threadID': thread}
threadEdit = Thread.objects.get(pk=thread)
userProfile = UserProfile.objects.get(fk_user=request.user)
if userProfile.fk_state.id == 1:
if request.method == 'POST':
form = FormThread(request.POST)
forumCategory = ForumCategories.objects.get(pk=category)
if form.is_valid():
print('Es valido')
threadSaved = form.save(commit=False)
threadEdit.name = threadSaved.name
threadEdit.content = threadSaved.content
threadEdit.save()
threadSaved = threadEdit
thread = Thread.objects.get(name=threadSaved.name, fk_forum=threadSaved.fk_forum,
content=threadSaved.content, fk_author=threadSaved.fk_author,
submitDate=threadSaved.submitDate)
return redirect('forums:forum_view_thread', forumCategory.pk, thread.pk)
else:
forumCategory = ForumCategories.objects.get(pk=category)
data['forumCategory'] = forumCategory
form = FormThread(initial={'fk_forum': forumCategory, 'fk_author': request.user, 'name': threadEdit.name,
'content': threadEdit.content})
data['form'] = form
######################### Notifications #########################
if request.user.is_authenticated:
data['shippingsNotifications'] = ShippingsPendingsNotifications(request.user)
if userProfile.fk_userType.pk == 1: # cliente
data['userTypeNotification'] = 'cliente'
else:
data['userTypeNotification'] = 'empresa'
####################### End Notifications #######################
return render(request, 'edit_thread.html', data)
else:
if userProfile.fk_state.id == 2:
return HttpResponseNotFound('<h2>Usted se encuentra baneado temporalmente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
if userProfile.fk_state.id == 3:
return HttpResponseNotFound('<h2>Usted se encuentra baneado permanentemente por conducta inapropiada para el sitio.</h2><a href="/">Ir al inicio</a>')
|
gpl-3.0
| -4,424,713,529,764,889,600 | 49.133333 | 162 | 0.565864 | false |
junwoo091400/MyCODES
|
Projects/FootPad_Logger/logged_data_analyzer_LSTM/RNN_LSTM.py
|
1
|
2131
|
from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import ipdb
def RNN_LSTM(batch_size_in = 5, total_len_in = 30000, pad_len_in = 5, backprop_len_in = 50, state_size_in = 10, num_class_in = 32):
# total_len_in = (backprop_len_in) * (num_batches)
# Get inputs.
batch_size = batch_size_in
total_series_length = total_len_in
pad_length = pad_len_in
truncated_backprop_length = backprop_len_in
state_size = state_size_in
num_classes = num_class_in
num_batches = total_series_length // truncated_backprop_length
#Model generate
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length, pad_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
cell_state = tf.placeholder(tf.float32, [batch_size, state_size])
hidden_state = tf.placeholder(tf.float32, [batch_size, state_size])
init_state = tf.nn.rnn_cell.LSTMStateTuple(cell_state, hidden_state)
# LSTM -> classes.
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1, num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.unstack(batchX_placeholder, axis=1)
labels_series = tf.unstack(batchY_placeholder, axis=1) # Becomes [truncated_len, batch_size]
# Forward passes
cell = tf.contrib.rnn.BasicLSTMCell(state_size, state_is_tuple=True)
states_series, current_state = tf.contrib.rnn.static_rnn(cell, inputs_series, init_state)#Input 'init_state' + 'inputs_series' + 'cell'
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
return (batchX_placeholder, batchY_placeholder, cell_state, hidden_state, current_state, predictions_series, W2, b2, cell, train_step, total_loss)
|
gpl-3.0
| 8,832,610,003,618,815,000 | 40.803922 | 147 | 0.742374 | false |
radekp/qt
|
doc/src/diagrams/programs/mdiarea.py
|
1
|
3712
|
#!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
## Contact: Nokia Corporation (qt-info@nokia.com)
##
## This file is part of the test suite of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:LGPL$
## No Commercial Usage
## This file contains pre-release code and may not be distributed.
## You may use this file in accordance with the terms and conditions
## contained in the Technology Preview License Agreement accompanying
## this package.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 as published by the Free Software
## Foundation and appearing in the file LICENSE.LGPL included in the
## packaging of this file. Please review the following information to
## ensure the GNU Lesser General Public License version 2.1 requirements
## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Nokia gives you certain additional
## rights. These rights are described in the Nokia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
## If you have questions regarding the use of this file, please contact
## Nokia at qt-info@nokia.com.
##
##
##
##
##
##
##
##
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt4.QtCore import SIGNAL
from PyQt4.QtGui import QApplication, QColor, QIcon, QLabel, QMdiArea, QPixmap, \
QPushButton, QTableWidget, QTableWidgetItem, QTextEdit
class Changer:
def __init__(self, mdiArea):
self.mdiArea = mdiArea
self.state = 0
def change(self):
if self.state == 0:
self.mdiArea.cascadeSubWindows()
self.mdiArea.setWindowTitle("Cascade")
elif self.state == 1:
self.mdiArea.tileSubWindows()
self.mdiArea.setWindowTitle("Tile")
self.state = (self.state + 1) % 2
if __name__ == "__main__":
app = QApplication(sys.argv)
pixmap = QPixmap(16, 16)
pixmap.fill(QColor(0, 0, 0, 0))
icon = QIcon(pixmap)
app.setWindowIcon(icon)
mdiArea = QMdiArea()
textEdit = QTextEdit()
textEdit.setPlainText("Qt Quarterly is a paper-based newsletter "
"exclusively available to Qt customers. Every "
"quarter we mail out an issue that we hope will "
"bring added insight and pleasure to your Qt "
"programming, with high-quality technical articles "
"written by Qt experts.")
textWindow = mdiArea.addSubWindow(textEdit)
textWindow.setWindowTitle("A Text Editor")
label = QLabel()
label.setPixmap(QPixmap("../../images/qt-logo.png"))
labelWindow = mdiArea.addSubWindow(label)
labelWindow.setWindowTitle("A Label")
items = (("Henry", 23), ("Bill", 56), ("Susan", 19), ("Jane", 47))
table = QTableWidget(len(items), 2)
for i in range(len(items)):
name, age = items[i]
item = QTableWidgetItem(name)
table.setItem(i, 0, item)
item = QTableWidgetItem(str(age))
table.setItem(i, 1, item)
tableWindow = mdiArea.addSubWindow(table)
tableWindow.setWindowTitle("A Table Widget")
mdiArea.show()
changer = Changer(mdiArea)
button = QPushButton("Cascade")
button.connect(button, SIGNAL("clicked()"), changer.change)
button.show()
sys.exit(app.exec_())
|
lgpl-2.1
| -810,234,570,365,328,900 | 32.441441 | 81 | 0.618804 | false |
EndlessDex/euler
|
13-largeSum.py
|
1
|
5187
|
numStr = ('''37107287533902102798797998220837590246510135740250
46376937677490009712648124896970078050417018260538
74324986199524741059474233309513058123726617309629
91942213363574161572522430563301811072406154908250
23067588207539346171171980310421047513778063246676
89261670696623633820136378418383684178734361726757
28112879812849979408065481931592621691275889832738
44274228917432520321923589422876796487670272189318
47451445736001306439091167216856844588711603153276
70386486105843025439939619828917593665686757934951
62176457141856560629502157223196586755079324193331
64906352462741904929101432445813822663347944758178
92575867718337217661963751590579239728245598838407
58203565325359399008402633568948830189458628227828
80181199384826282014278194139940567587151170094390
35398664372827112653829987240784473053190104293586
86515506006295864861532075273371959191420517255829
71693888707715466499115593487603532921714970056938
54370070576826684624621495650076471787294438377604
53282654108756828443191190634694037855217779295145
36123272525000296071075082563815656710885258350721
45876576172410976447339110607218265236877223636045
17423706905851860660448207621209813287860733969412
81142660418086830619328460811191061556940512689692
51934325451728388641918047049293215058642563049483
62467221648435076201727918039944693004732956340691
15732444386908125794514089057706229429197107928209
55037687525678773091862540744969844508330393682126
18336384825330154686196124348767681297534375946515
80386287592878490201521685554828717201219257766954
78182833757993103614740356856449095527097864797581
16726320100436897842553539920931837441497806860984
48403098129077791799088218795327364475675590848030
87086987551392711854517078544161852424320693150332
59959406895756536782107074926966537676326235447210
69793950679652694742597709739166693763042633987085
41052684708299085211399427365734116182760315001271
65378607361501080857009149939512557028198746004375
35829035317434717326932123578154982629742552737307
94953759765105305946966067683156574377167401875275
88902802571733229619176668713819931811048770190271
25267680276078003013678680992525463401061632866526
36270218540497705585629946580636237993140746255962
24074486908231174977792365466257246923322810917141
91430288197103288597806669760892938638285025333403
34413065578016127815921815005561868836468420090470
23053081172816430487623791969842487255036638784583
11487696932154902810424020138335124462181441773470
63783299490636259666498587618221225225512486764533
67720186971698544312419572409913959008952310058822
95548255300263520781532296796249481641953868218774
76085327132285723110424803456124867697064507995236
37774242535411291684276865538926205024910326572967
23701913275725675285653248258265463092207058596522
29798860272258331913126375147341994889534765745501
18495701454879288984856827726077713721403798879715
38298203783031473527721580348144513491373226651381
34829543829199918180278916522431027392251122869539
40957953066405232632538044100059654939159879593635
29746152185502371307642255121183693803580388584903
41698116222072977186158236678424689157993532961922
62467957194401269043877107275048102390895523597457
23189706772547915061505504953922979530901129967519
86188088225875314529584099251203829009407770775672
11306739708304724483816533873502340845647058077308
82959174767140363198008187129011875491310547126581
97623331044818386269515456334926366572897563400500
42846280183517070527831839425882145521227251250327
55121603546981200581762165212827652751691296897789
32238195734329339946437501907836945765883352399886
75506164965184775180738168837861091527357929701337
62177842752192623401942399639168044983993173312731
32924185707147349566916674687634660915035914677504
99518671430235219628894890102423325116913619626622
73267460800591547471830798392868535206946944540724
76841822524674417161514036427982273348055556214818
97142617910342598647204516893989422179826088076852
87783646182799346313767754307809363333018982642090
10848802521674670883215120185883543223812876952786
71329612474782464538636993009049310363619763878039
62184073572399794223406235393808339651327408011116
66627891981488087797941876876144230030984490851411
60661826293682836764744779239180335110989069790714
85786944089552990653640447425576083659976645795096
66024396409905389607120198219976047599490197230297
64913982680032973156037120041377903785566085089252
16730939319872750275468906903707539413042652315011
94809377245048795150954100921645863754710598436791
78639167021187492431995700641917969777599028300699
15368713711936614952811305876380278410754449733078
40789923115535562561142322423255033685442488917353
44889911501440648020369068063960672322193204149535
41503128880339536053299340368006977710650566631954
81234880673210146739058568557934581403627822703280
82616570773948327592232845941706525094512325230608
22918802058777319719839450180888072429661980811197
77158542502016545090413245809786882778948721859617
72107838435069186155435662884062257473692284509516
20849603980134001723930671666823555245252804609722
53503534226472524250874054075591789781264330331690''')
numArr = [int(num) for num in numStr.split('\n')]
print(sum(numArr))
|
mit
| -5,320,979,860,929,871,000 | 49.359223 | 63 | 0.973973 | false |
RicherMans/QPy
|
src/qpy.py
|
1
|
8949
|
import marshal
import subprocess as sub
import os
from inspect import getsource, getargspec
import re
import types
from functools import wraps
from contextlib import contextmanager
import sys
import qsubsettings
from glob import glob
def _globalimports(func):
for name, val in func.__globals__.iteritems():
if isinstance(val, types.ModuleType) and not name.startswith('__'):
yield val.__name__
def _globalaliasimports(func):
for name, modtype in func.func_globals.items():
if isinstance(modtype, types.ModuleType) and not name.startswith('__'):
yield name
# Procedure needs to be executed in the main file, since the locals are only visible from
# here. We use the localmodules as the real name in the produced python scripts for execution
# e.g. the global imports will be set as: import GLOBAL as LOCAL
# localmodules = [key for key in locals().keys()
# if isinstance(locals()[key], type(sys)) and not key.startswith('__')]
# importedmodules = zip(list(_globalimports()), localmodules)
@contextmanager
def stdout_redirected(to=os.devnull):
'''
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
fd = sys.stdout.fileno()
# assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
def _getQsubCmd(settings):
return qsubsettings._parseSettings(settings)
def runcluster(numjobs, settings=qsubsettings.smallclustersetting):
'''
The main function of this module. Decorator which helps running a function in parallel
on the gridengine cluster
numjobs : The amount of Jobs which will be run for this function
settings : A dict which contains all qsub commands and parameters.
Can be extended at will, whereas the keys of this dict are the arguments of qsub e.g.
{
-o:outputfile,
-P:gpu.p,
....
}
Usage :
@runcluster(5)
def add(a,b):
return a+b
runs the function add(a,b) on the cluster with 5 spawning Jobs
'''
def decorator(func):
@wraps(func)
def wrap(*args, **kw):
try:
# Check if the @settings decorator did pass different settings
settings = kw['settings']
except:
pass
qsubcmd = _getQsubCmd(settings)
return _run_jobs(qsubcmd, numjobs, func, zip(*args))
return wrap
return decorator
# class SingleFileModuleFinder(modulefinder.ModuleFinder):
# def import_hook(self, name, caller, *arg, **kwarg):
# if caller.__file__ == self.name:
# Only call the parent at the top level.
# return modulefinder.ModuleFinder.import_hook(self, name, caller, *arg,
# **kwarg)
# def __call__(self, node):
# self.name = str(node)
# self.run_script(self.name)
def _getModuleImports(func):
'''
Gets from the given function it's modules imports
returns a list of tuples, where the fist item represents
the full import name and the second is it's local alias
e.g.:
import marshal as mar
The list would have the values:
[(marshal','mar')]
'''
globalimports = list(_globalimports(func))
globalaliases = list(_globalaliasimports(func))
return zip(globalimports, globalaliases)
def _pickleLoadScript(mdict, modules):
'''
mdict: Dictionary containing the following keys:
loaddir: Path to the file which is going to be taken as input
functiondef : The full function definition
functionname: The name of the given function, which will be called
output: The name of the outputfile which will be generated
'''
lines = []
for globalname, localname in modules:
lines.append('import {} as {}'.format(globalname, localname))
lines.append('import marshal')
lines.append("data = marshal.load(open('%(loaddir)s','rb'))" % (mdict))
lines.append("%(functiondef)s" % (mdict))
lines.append("ret=[]")
lines.append('for arg in data:')
lines.append(' ret.append(%(functionname)s(*arg))' % (mdict))
lines.append("marshal.dump(ret,open('%(output)s','wb'))" % (mdict))
return os.linesep.join(lines)
def _suppressedPopen(args):
'''
Same as sub.Popen(args) call but supresses the output
'''
with stdout_redirected():
return sub.Popen(args)
def _run_jobs(qsubcmd, n, func, data):
datachunks = _splitintochunks(data, n)
funcret = []
runningJobs = []
# Parameters which are related to the function which will be decorated
rawsource = getsource(func)
argspec = getargspec(func)
# Since the source has the decoration header, @runcluster we remove it
# Remove the lines not starting with @, which indicates a decorator
filteredlines = re.findall("^(?!@).*", rawsource, re.MULTILINE)
# source = rawsource[firstline:]
source = os.linesep.join(filteredlines)
tmpfiles = [] # Keeps track of all open tempfiles
try:
for i, chunk in enumerate(datachunks):
# Create some tempfiles which will be used as python script and binary
# dumps respectively, cannot use tempfile since marshal does not allow
# to use a wrapper as input
tmpscript = open('{}_run_{}'.format(func.__name__, i + 1), 'w')
datadump = open('{}_data_{}'.format(func.__name__, i + 1), 'w+b')
output = open('{}_out_{}'.format(func.__name__, i + 1), 'w+b')
# output = '{}_out_{}'.format(func.__name__, i + 1)
# Output needs to be closed separately, since we want to keep the
# file on the system as long as the qsub command is runnung
marshal.dump(chunk, datadump)
mdict = {
'functiondef': source,
# The name of the datadump which will be generated using pickle
'loaddir': datadump.name,
'functionname': func.func_name,
'args': argspec.args,
'output': output.name
}
imports = _getModuleImports(func)
tmpscript.write(_pickleLoadScript(mdict, imports))
tmpscript.flush()
# Reset the datadump pointer, otherwise EOFError
datadump.close()
cur_qsub = qsubcmd + [tmpscript.name]
job = _suppressedPopen(cur_qsub)
tmpfiles.append((tmpscript, datadump, output))
runningJobs.append(job)
# execfile(tmpscript.name, dict(), ret)
for job, tmpfilestuple in zip(runningJobs, tmpfiles):
# Since we use the -sync flag, we need to wait for the calling command
# to finish
retcode = job.wait()
# If we have any retcode, we keep the log outputs of the gridengine
# alive
tmpscript, dump, output = tmpfilestuple
tmpscript.close()
if retcode:
raise ValueError(
"An error Occured while running the gridengine, please refer to the logs produced in the calling directory")
else: # Otherwise delete the logs of gridengine
for ftoremove in glob('%s*' % (tmpscript.name)):
absremovepath = os.path.join(os.getcwd(), ftoremove)
os.remove(absremovepath)
output.seek(0)
funcret.extend(marshal.load(output))
output.close()
dump.close()
os.remove(output.name)
os.remove(dump.name)
except:
for f in tmpfiles:
tmpscript, dump, output = f
output.close()
tmpscript.close()
dump.close()
os.remove(output.name)
os.remove(tmpscript.name)
os.remove(dump.name)
return funcret
def _splitintochunks(l, num):
'''
Splits the given list l into roughly equal num chunks as iterator.
It calculates the optimal split for the given NUM in relation to the length of the list l
Note that the returned iterator has not necessary NUM chunks
'''
spl, ext = divmod(len(l), num)
if ext:
spl += 1
return (l[i:i + spl] for i in xrange(0, len(l), spl))
|
mit
| -8,531,946,139,641,074,000 | 34.939759 | 128 | 0.613476 | false |
pgmillon/ansible
|
test/units/pytest/plugins/ansible_pytest_coverage.py
|
1
|
1260
|
"""Monkey patch os._exit when running under coverage so we don't lose coverage data in forks, such as with `pytest --boxed`."""
from __future__ import (absolute_import, division, print_function)
def pytest_configure():
try:
import coverage
except ImportError:
coverage = None
try:
test = coverage.Coverage
except AttributeError:
coverage = None
if not coverage:
return
import gc
import os
coverage_instances = []
for obj in gc.get_objects():
if isinstance(obj, coverage.Coverage):
coverage_instances.append(obj)
if not coverage_instances:
coverage_config = os.environ.get('COVERAGE_CONF')
if not coverage_config:
return
coverage_output = os.environ.get('COVERAGE_FILE')
if not coverage_output:
return
cov = coverage.Coverage(config_file=coverage_config)
coverage_instances.append(cov)
else:
cov = None
os_exit = os._exit
def coverage_exit(*args, **kwargs):
for instance in coverage_instances:
instance.stop()
instance.save()
os_exit(*args, **kwargs)
os._exit = coverage_exit
if cov:
cov.start()
|
gpl-3.0
| -5,488,508,721,618,347,000 | 21.5 | 127 | 0.601587 | false |
Schpin/schpin-chassis
|
schpin_tote/src/lib/stl.py
|
1
|
1937
|
import subprocess, os
from lib.util import convert_scad
def render(filename, scad_cfg, mirror):
""" renders scad module defined by scad_cfg into stl 'filename' """
assert filename[-1] == 'b'
scad = "../scad/tmp.scad"
with open(scad, "w") as fd:
fd.write("include <model.scad>;\n $fn=32;")
if mirror is not None:
fd.write("mirror({})".format([int(x) for x in mirror]))
args = ",".join([str(convert_scad(x)) for x in scad_cfg['args']])
fd.write(scad_cfg['module'] + "(" + args + ");")
tmp_stl = "../stl/tmp.stl"
print("Rendering: ", filename)
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(['openscad', '-o', tmp_stl, scad], stdout=devnull, stderr=devnull)
subprocess.check_call(['ivcon', tmp_stl, filename])
subprocess.check_call(["admesh", "--write-binary-stl=" + filename, "--scale=0.001", filename])
os.remove(tmp_stl)
os.remove(scad)
def render_chain(segments, name, folder, mirror):
if not os.path.exists("../stl/" + folder):
os.mkdir("../stl/" + folder)
if 'scad' in segments[name]:
render("../stl/" + folder + name + ".stlb", segments[name]['scad'], mirror)
if 'next_segment' in segments[name]:
render_chain(segments, segments[name]['next_segment'], folder, mirror)
def render_stl(cfg):
render("../stl/base_link.stlb", {"module": "body", "args": []}, mirror=None)
if not os.path.exists("../stl/leg/"):
os.mkdir("../stl/leg")
for leg, leg_cfg in cfg['legs'].items():
mirror = None
if 'mirror' in leg_cfg:
mirror = leg_cfg['mirror']
render_chain(
leg_cfg['segments'],
leg_cfg['chain_root'],
folder="leg/" + leg + "/",
mirror=mirror
)
render_chain(
cfg['gimbal']['segments'], cfg['gimbal']['chain_root'], folder="gimbal/", mirror=None
)
|
gpl-3.0
| -4,759,282,188,491,088,000 | 28.348485 | 98 | 0.56634 | false |
bmenendez/20up
|
tntwrapper.py
|
1
|
7920
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (C) 2016 Borja Menendez Moreno
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors: Borja Menéndez Moreno <info20up@gmail.com>
This is the API Wrapper for the 20up backup program. This wrapper allows
a client to retrieve information about his specific account.
"""
import os, urllib, string
from time import sleep
from tntapi import *
CONSTANT_FILL = 6
MAX_TRIES = 10
ROOTPATH = os.getcwdu()
PHOTOS = 'fotos'
COMMENTS = 'comentarios'
JPG = '.jpg'
TXT = '.txt'
def getFullName(picture, counter):
return normalize(string.zfill(counter, CONSTANT_FILL) + '_' + picture[2] + '_' + picture[1])
class Wrapper():
"""
The wrapper for the tntapi.
This class eases the connection.
When constructed, it raises a RuntimeError if it is impossible to log in the
social network.
"""
def __init__(self, browser, console=False):
self.tnt = API(browser)
self.isLogged = False
self.console = console
def waitForLogin(self):
self.tnt.waitForLogin()
def downloadPicturesFromAlbum(self, album, totalPictures, alreadyDownloaded, oldFirstPicture, comments=False):
"""
Download pictures from a given album into the given directory.
Args:
album: the album.
totalPictures: the total number of pictures of the user.
alreadyDownloaded: the number of pictures already downloaded.
oldFirstPicture: the first picture of the previous album.
comments: indicates wether obtain comments of the picture or not.
Raises:
RuntimeError if the user is not already logged in.
"""
if not self.isLogged:
raise RuntimeError('Es necesario estar logueado en la red social')
if self.console:
print '|'
print '| Album', album[0]
print '|'
print '| Obteniendo informacion del album'
joinPath = os.path.join(ROOTPATH, PHOTOS)
if not os.path.exists(joinPath):
if self.console:
print '| Creando directorio donde se alojaran todas las fotos...'
os.makedirs(joinPath)
if self.console:
print '| Directorio creado'
albumPath = os.path.join(joinPath, album[0])
if not os.path.exists(albumPath):
if self.console:
print '| Creando directorio donde se alojaran las fotos del album...'
os.makedirs(albumPath)
if self.console:
print '| Directorio creado'
os.chdir(albumPath)
if self.console:
print '| Comenzando la descarga de las fotos del album...'
counter = 1
newFirstPicture = self.tnt.getFirstPicture(album[2], oldFirstPicture)
firstPicture = ''
lastPicture = ['']
oldSrc = ''
while counter <= album[1] and newFirstPicture != oldFirstPicture:
pic = self.tnt.getPicture(oldSrc, comments)
oldSrc = pic
if counter == 1:
firstPicture = pic
elif pic[0] == firstPicture[0]:
break
if lastPicture[0] != pic[0]:
self.savePicture(pic, counter, album[1], totalPictures, alreadyDownloaded + counter)
if comments:
self.saveComments(pic, counter)
counter += 1
lastPicture = pic
self.tnt.getNextPicture()
return newFirstPicture
def savePicture(self, picture, myCounter, totalAlbum, totalPics, alreadyDown):
"""
Save a picture.
Args:
picture: a picture to be saved.
myCounter: the counter for the picture.
totalAlbum: the number of pictures of the album.
totalPics: the number of pictures of the user.
alreadyDown: the number of pictures already downloaded.
"""
sleep(0.25)
picName = getFullName(picture, myCounter) + JPG
if not os.path.exists(picName):
if self.console:
totalPerc = str(100 * alreadyDown / totalPics)
albumPerc = str(100 * myCounter / totalAlbum)
print '|'
print '| [' + totalPerc + '% total] [' + albumPerc + '% album]'
print '| Descargando foto ' + picName + '...'
urllib.urlretrieve(picture[0], picName)
def saveComments(self, picture, myCounter):
"""
Save a picture's comments.
Args:
picture: to obtain the comments.
myCounter: to know the name of the file with comments.
"""
commentsFileName = getFullName(picture, myCounter) + TXT
if not os.path.exists(commentsFileName) and picture[3] != []:
if self.console:
print '| Descargando sus comentarios...'
file2write = open(commentsFileName, 'w')
for comment in picture[3]:
file2write.write('******************\r\n')
file2write.write(comment.encode('utf-8') + '\r\n')
file2write.close()
def downloadAllPictures(self, comments=False):
"""
Download all the pictures for all the albums.
Args:
comments: indicates wether obtain comments of the picture or not.
Raises:
RuntimeError if the user is not already logged in.
"""
allAlbums = self.tnt.getAllAlbums()
self.isLogged = (allAlbums != None)
if not self.isLogged:
return -1
totalPictures = 0
for album in allAlbums:
totalPictures += album[1]
alreadyDownloaded = 0
oldFirstPicture = ''
for album in allAlbums:
oldFirstPicture = self.downloadPicturesFromAlbum(album, totalPictures, alreadyDownloaded, oldFirstPicture, comments)
alreadyDownloaded += album[1]
return 0
def goToPrivates(self):
"""
Call the API to go to the private messages' page.
"""
self.tnt.goToPrivates()
def downloadAllComments(self):
"""
Download all the comments in the wall.
"""
os.chdir(ROOTPATH)
file2write = open(COMMENTS + TXT, 'w')
tries = 0
discard = 0
while True:
comments = self.tnt.loadMoreComments(discard)
if not comments:
if tries < MAX_TRIES:
tries += 1
sleep(0.3)
else:
break
else:
tries = 1
discard += len(comments)
if self.console:
print '| Descargados ', discard, 'comentarios'
self.saveWall(comments, file2write)
file2write.close()
def saveWall(self, comments, file2write):
"""
Write the comments in the file.
Args:
comments: the list of comments to be saved.
file2write: the file to write in.
"""
for comment in comments:
file2write.write(comment.encode('utf-8') + '\r\n\r\n')
|
gpl-3.0
| 2,368,601,534,291,196,400 | 33.580786 | 128 | 0.572042 | false |
jalourenco/wagtaildemo
|
demo/templatetags/demo_tags.py
|
1
|
5358
|
from datetime import date
from django import template
from django.conf import settings
from demo.models import *
register = template.Library()
# settings value
@register.assignment_tag
def get_googe_maps_key():
return getattr(settings, 'GOOGLE_MAPS_KEY', "")
@register.assignment_tag(takes_context=True)
def get_site_root(context):
# NB this returns a core.Page, not the implementation-specific model used
# so object-comparison to self will return false as objects would differ
return context['request'].site.root_page
def has_menu_children(page):
if page.get_children().filter(live=True, show_in_menus=True):
return True
else:
return False
# Retrieves the top menu items - the immediate children of the parent page
# The has_menu_children method is necessary because the bootstrap menu requires
# a dropdown class to be applied to a parent
@register.inclusion_tag('demo/tags/top_menu.html', takes_context=True)
def top_menu(context, parent, calling_page=None):
menuitems = parent.get_children().filter(
live=True,
show_in_menus=True
)
for menuitem in menuitems:
menuitem.show_dropdown = has_menu_children(menuitem)
return {
'calling_page': calling_page,
'menuitems': menuitems,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the children of the top menu items for the drop downs
@register.inclusion_tag('demo/tags/top_menu_children.html', takes_context=True)
def top_menu_children(context, parent):
menuitems_children = parent.get_children()
menuitems_children = menuitems_children.filter(
live=True,
show_in_menus=True
)
return {
'parent': parent,
'menuitems_children': menuitems_children,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves the secondary links for the 'also in this section' links
# - either the children or siblings of the current page
@register.inclusion_tag('demo/tags/secondary_menu.html', takes_context=True)
def secondary_menu(context, calling_page=None):
pages = []
if calling_page:
pages = calling_page.get_children().filter(
live=True,
show_in_menus=True
)
# If no children, get siblings instead
if len(pages) == 0:
pages = calling_page.get_siblings(inclusive=False).filter(
live=True,
show_in_menus=True
)
return {
'pages': pages,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Retrieves all live pages which are children of the calling page
#for standard index listing
@register.inclusion_tag(
'demo/tags/standard_index_listing.html',
takes_context=True
)
def standard_index_listing(context, calling_page):
pages = calling_page.get_children().filter(live=True)
return {
'pages': pages,
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Person feed for home page
@register.inclusion_tag(
'demo/tags/person_listing_homepage.html',
takes_context=True
)
def person_listing_homepage(context, count=2):
people = PersonPage.objects.filter(live=True).order_by('?')
return {
'people': people[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Blog feed for home page
@register.inclusion_tag(
'demo/tags/blog_listing_homepage.html',
takes_context=True
)
def blog_listing_homepage(context, count=2):
blogs = BlogPage.objects.filter(live=True).order_by('-date')
return {
'blogs': blogs[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Events feed for home page
@register.inclusion_tag(
'demo/tags/event_listing_homepage.html',
takes_context=True
)
def event_listing_homepage(context, count=2):
events = EventPage.objects.filter(live=True)
events = events.filter(date_from__gte=date.today()).order_by('date_from')
return {
'events': events[:count],
# required by the pageurl tag that we want to use within this template
'request': context['request'],
}
# Advert snippets
@register.inclusion_tag('demo/tags/adverts.html', takes_context=True)
def adverts(context):
return {
'adverts': Advert.objects.all(),
'request': context['request'],
}
# Format times e.g. on event page
@register.filter
def time_display(time):
# Get hour and minute from time object
hour = time.hour
minute = time.minute
# Convert to 12 hour format
if hour >= 12:
pm = True
hour -= 12
else:
pm = False
if hour == 0:
hour = 12
# Hour string
hour_string = str(hour)
# Minute string
if minute != 0:
minute_string = "." + str(minute)
else:
minute_string = ""
# PM string
if pm:
pm_string = "pm"
else:
pm_string = "am"
# Join and return
return "".join([hour_string, minute_string, pm_string])
|
bsd-3-clause
| 3,196,997,872,230,201,300 | 27.5 | 79 | 0.653602 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.