text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
import sys, os, time
from Tools.HardwareInfo import HardwareInfo
def getVersionString():
return getImageVersionString()
def getImageVersionString():
try:
if os.path.isfile('/var/lib/opkg/status'):
st = os.stat('/var/lib/opkg/status')
else:
st = os.stat('/usr/lib/ipkg/status')
tm = time.localtime(st.st_mtime)
if tm.tm_year >= 2011:
return time.strftime("%Y-%m-%d %H:%M:%S", tm)
except:
pass
return _("unavailable")
def getFlashDateString():
try:
return time.strftime(_("%Y-%m-%d %H:%M"), time.localtime(os.stat("/boot").st_ctime))
except:
return _("unknown")
def getEnigmaVersionString():
import enigma
enigma_version = enigma.getEnigmaVersionString()
if '-(no branch)' in enigma_version:
enigma_version = enigma_version [:-12]
return enigma_version
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
return open("/proc/version","r").read().split(' ', 4)[2].split('-',2)[0]
except:
return _("unknown")
def getHardwareTypeString():
return HardwareInfo().get_device_string()
def getImageTypeString():
try:
return open("/etc/issue").readlines()[-2].capitalize().strip()[:-6]
except:
return _("undefined")
def getCPUInfoString():
try:
cpu_count = 0
cpu_speed = 0
for line in open("/proc/cpuinfo").readlines():
line = [x.strip() for x in line.strip().split(":")]
if line[0] in ("system type", "model name"):
processor = line[1].split()[0]
elif line[0] == "cpu MHz":
cpu_speed = "%1.0f" % float(line[1])
elif line[0] == "processor":
cpu_count += 1
if not cpu_speed:
try:
cpu_speed = int(open("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq").read()) / 1000
except:
cpu_speed = "-"
if os.path.isfile('/proc/stb/fp/temp_sensor_avs'):
temperature = open("/proc/stb/fp/temp_sensor_avs").readline().replace('\n','')
return "%s %s MHz (%s) %s°C" % (processor, cpu_speed, ngettext("%d core", "%d cores", cpu_count) % cpu_count, temperature)
return "%s %s MHz (%s)" % (processor, cpu_speed, ngettext("%d core", "%d cores", cpu_count) % cpu_count)
except:
return _("undefined")
def getDriverInstalledDate():
try:
from glob import glob
driver = [x.split("-")[-2:-1][0][-8:] for x in open(glob("/var/lib/opkg/info/*-dvb-modules-*.control")[0], "r") if x.startswith("Version:")][0]
return "%s-%s-%s" % (driver[:4], driver[4:6], driver[6:])
except:
return _("unknown")
def getPythonVersionString():
try:
import commands
status, output = commands.getstatusoutput("python -V")
return output.split(' ')[1]
except:
return _("unknown")
def getDriverVuInstalledDate():
try:
driver = os.popen("opkg list-installed | grep vuplus-dvb-").read().strip()
driver = driver.split("-")
#return driver[:4] + "-" + driver[4:6] + "-" + driver[6:]
return driver[5]
except:
return "unknown"
# For modules that do "from About import about"
about = sys.modules[__name__]
|
openmb/openblackhole-enigma2
|
lib/python/Components/About.py
|
Python
|
gpl-2.0
| 2,999 | 0.037012 |
"""
vg composite command
Build composite images from centered images, based on records in composites.csv.
See also vgInitComposites.py, which builds initial pass at composites.csv.
Note: even single channel images get a composite image (bw).
Uses centered image if available, otherwise uses the plain adjusted image.
"""
import os
import csv
import cv2
import config
import lib
import libimg
import vgCenter
import vgInpaint
def printStatus(channelRows,volume,nfile,startId):
"print status message"
nchannels = len(channelRows)
print 'Volume %s compositing %d: %s (%d channels) \r' % \
(volume,nfile,startId,nchannels),
def processChannels(channelRows, optionAlign):
"""
Combine channel images into new file, attempting to align them if optionAlign is True.
channelRows is an array of rows corresponding to rows in the composites.csv file.
should have [compositeId,centerId,volnum,filter,weight,x,y]
eg [
['C434823','C434823','5101','Orange']
['C434823','C434825','5101','Blue','0.8','42','18']
['C434823','C434827','5101','Green','1','-50','83']
]
they are combined and written to a file in the composites folder, step05_composites.
Can have single channel groups.
If optionAlign is True, will attempt to align the channels, and will return updated
x,y values in channelRows.
"""
#. could also have zoom factor, warp info, rotate
# for row in channelRows: print row
centered = False
weightXYFilledOut = False
if len(channelRows) > 0:
volume = ''
compositeId = ''
channels = []
for row in channelRows:
compositeId = row[config.colCompositesCompositeId]
fileId = row[config.colCompositesFileId]
volume = row[config.colCompositesVolume]
filter = row[config.colCompositesFilter]
weight = float(row[config.colCompositesWeight]) \
if len(row)>config.colCompositesWeight else 1.0
x = int(row[config.colCompositesX]) if len(row)>config.colCompositesX else 0
y = int(row[config.colCompositesY]) if len(row)>config.colCompositesY else 0
if len(row)>config.colCompositesWeight: weightXYFilledOut = True
# if don't have an inpaint or centered file, use the adjusted file
channelfilepath = lib.getFilepath('inpaint', volume, fileId)
if os.path.isfile(channelfilepath):
centered = True
else:
channelfilepath = lib.getFilepath('center', volume, fileId, filter)
if os.path.isfile(channelfilepath):
centered = True
else:
channelfilepath = lib.getFilepath('adjust', volume, fileId, filter)
if os.path.isfile(channelfilepath):
channel = [fileId,filter,channelfilepath,weight,x,y]
channels.append(channel)
if len(channels)>0:
outfilepath = lib.getFilepath('composite', volume, compositeId)
if centered: optionAlign = False # don't try to align images if already centered
if weightXYFilledOut: optionAlign = False # don't align if already have values
# combine the channel images
im, channels = libimg.combineChannels(channels, optionAlign)
libimg.imwrite(outfilepath, im)
# if -align: update channels x,y etc
if optionAlign:
# make sure all the rows have all their columns
for row in channelRows:
while len(row)<=config.colCompositesY:
row.append('')
# find each row in channelRows and update weights and x,y translation
for row in channels:
for row2 in channelRows:
if row2[config.colCompositesFileId]==row[config.colChannelFileId]:
row2[config.colCompositesWeight]=row[config.colChannelWeight]
row2[config.colCompositesX]=row[config.colChannelX]
row2[config.colCompositesY]=row[config.colChannelY]
# print [ch[:-1] for ch in channels if ch]
# return channels
# caller needs to know if x,y values were changed
xyChanged = not centered
return xyChanged
def writeUpdates(csvNew, channelRows):
""
for row in channelRows:
# row = [compositeId, fileId, volume, filter, weight, x, y]
csvNew.writerow(row)
# print row
def vgComposite(filterVolume=None, filterCompositeId=None, filterTargetPath=None,
optionOverwrite=False, optionAlign=False, directCall=True):
"""
Build composite images by combining channel images.
Walks over records in composites.csv, merges channel images, writes to composites folder.
eg
composites.csv:
compositeId,centerId,volume,filter,weight,x,y
C1537728,C1537728,5103,Blue
C1537728,C1537730,5103,Orange,0.8
C1537728,C1537732,5103,Green,1,10,3
=>
step05_composites/VGISS_5103/C1537728_composite.jpg
Note: weight,x,y are optional - default to 1,0,0
"""
if filterCompositeId: filterCompositeId = filterCompositeId.upper() # always capital C
# note: targetPathParts = [system, craft, target, camera]
targetPathParts = lib.parseTargetPath(filterTargetPath)
# build volume for previous step
if filterVolume:
filterVolume = str(filterVolume)
outputSubfolder = lib.getSubfolder('composite', filterVolume)
# quit if volume folder exists
if os.path.isdir(outputSubfolder) and optionOverwrite==False:
if directCall: print "Folder exists: " + outputSubfolder
return
# build the previous step, if not already there
vgCenter.vgCenter(filterVolume, '', optionOverwrite=False, directCall=False)
# vgInpaint.vgInpaint(filterVolume, '', optionOverwrite=False, directCall=False)
# make folder
lib.mkdir(outputSubfolder)
# read small dbs into memory
compositingInfo = lib.readCsv(config.dbCompositing) # when to turn centering on/off
retargetingInfo = lib.readCsv(config.dbRetargeting) # remapping listed targets
# open files.csv so can join to it
csvFiles, fFiles = lib.openCsvReader(config.dbFiles)
# open compositesNew.csv for writing
if optionAlign:
lib.rm(config.dbCompositesNew)
csvNew, fNew = lib.openCsvWriter(config.dbCompositesNew)
# iterate over composites.csv records
csvComposites, fComposites = lib.openCsvReader(config.dbComposites)
startId = ''
startVol = ''
channelRows = []
nfile = 0
for row in csvComposites:
# get composite info
compositeId = row[config.colCompositesCompositeId]
fileId = row[config.colCompositesFileId]
volume = row[config.colCompositesVolume]
# join on files.csv to get more image properties
# (note: since compositeId repeats, we might have already advanced to the next record,
# in which case rowFiles will be None. But the target properties will remain the same.)
rowFiles = lib.getJoinRow(csvFiles, config.colFilesFileId, compositeId)
if rowFiles:
# get file info
filter = rowFiles[config.colFilesFilter]
system = rowFiles[config.colFilesSystem]
craft = rowFiles[config.colFilesCraft]
target = rowFiles[config.colFilesTarget]
camera = rowFiles[config.colFilesCamera]
# relabel target field if necessary - see db/targets.csv for more info
target = lib.retarget(retargetingInfo, compositeId, target)
# filter on volume, composite id and targetpath
volumeOk = (volume==filterVolume if filterVolume else True)
compositeOk = (compositeId==filterCompositeId if filterCompositeId else True)
targetPathOk = (lib.targetMatches(targetPathParts, system, craft, target, camera) \
if filterTargetPath else True)
doComposite = (volumeOk and compositeOk and targetPathOk)
if doComposite:
# gather image filenames into channelRows so can merge them
if compositeId == startId:
channelRows.append(row)
else:
# we're seeing a new compositeId, so process all the gathered channels
printStatus(channelRows,startVol,nfile,startId)
processChannels(channelRows, optionAlign)
# processChannels(channelRows, optionAlign, csvNew)
# xyChanged = processChannels(channelRows, optionAlign)
# if optionAlign and xyChanged:
# writeUpdates(csvNew, channelRows)
startId = compositeId
startVol = volume
channelRows = [row]
nfile += 1
# process the last leftover group
# print channelRows
printStatus(channelRows,startVol,nfile,startId)
processChannels(channelRows, optionAlign)
# processChannels(channelRows, optionAlign, csvNew)
# xyChanged = processChannels(channelRows,optionAlign)
# if optionAlign and xyChanged:
# writeUpdates(csvNew, channelRows)
print
if optionAlign: fNew.close()
fFiles.close()
fComposites.close()
if __name__ == '__main__':
os.chdir('..')
# vgComposite(5117)
# vgComposite(8207)
# vgComposite(None,'c1617245')
# ariel - works
# vgComposite(None,'c2684338',None,optionOverwrite=True)
# automatic - nowork
# vgComposite(None,'c2684338',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','7206','c2684338')
# ganymede
# folder = '../../data/step04_adjust/VGISS_5117/'
# file1 = folder + 'C1640236_adjusted_Blue.jpg'
# file2 = folder + 'C1640234_adjusted_Violet.jpg'
# file3 = folder + 'C1640238_adjusted_Orange.jpg'
# vgComposite(None,'C1640232',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','5117','C1640232')
# vgComposite(None,'C1640222',None,optionOverwrite=True, optionAlign=True)
# filename = lib.getFilepath('composite','5117','C1640222')
vgComposite(None,'C1642718',None,optionOverwrite=True, optionAlign=True)
filename = lib.getFilepath('composite','5117','C1642718')
im = cv2.imread(filename)
libimg.show(im)
# uranus
# vgComposite(None,'C2656801',True)
# filename = lib.getFilepath('composite','7205','C2656801')
# im = cv2.imread(filename)
# libimg.show(im)
print 'done'
|
bburns/PyVoyager
|
src/vgComposite.py
|
Python
|
mit
| 10,684 | 0.007675 |
import os
import sys
import unittest2
import mitogen
import mitogen.ssh
import mitogen.utils
import testlib
import plain_old_module
def get_sys_executable():
return sys.executable
def get_os_environ():
return dict(os.environ)
class LocalTest(testlib.RouterMixin, unittest2.TestCase):
stream_class = mitogen.ssh.Stream
def test_stream_name(self):
context = self.router.local()
pid = context.call(os.getpid)
self.assertEquals('local.%d' % (pid,), context.name)
class PythonPathTest(testlib.RouterMixin, unittest2.TestCase):
stream_class = mitogen.ssh.Stream
def test_inherited(self):
context = self.router.local()
self.assertEquals(sys.executable, context.call(get_sys_executable))
def test_string(self):
os.environ['PYTHON'] = sys.executable
context = self.router.local(
python_path=testlib.data_path('env_wrapper.sh'),
)
self.assertEquals(sys.executable, context.call(get_sys_executable))
env = context.call(get_os_environ)
self.assertEquals('1', env['EXECUTED_VIA_ENV_WRAPPER'])
def test_list(self):
context = self.router.local(
python_path=[
testlib.data_path('env_wrapper.sh'),
"magic_first_arg",
sys.executable
]
)
self.assertEquals(sys.executable, context.call(get_sys_executable))
env = context.call(get_os_environ)
self.assertEquals('magic_first_arg', env['ENV_WRAPPER_FIRST_ARG'])
self.assertEquals('1', env['EXECUTED_VIA_ENV_WRAPPER'])
if __name__ == '__main__':
unittest2.main()
|
ConnectBox/wifi-test-framework
|
ansible/plugins/mitogen-0.2.3/tests/local_test.py
|
Python
|
mit
| 1,663 | 0 |
#
# Copyright 2020 University of Washington
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Hao Yin and Sebastien Deronne
#
import numpy as np
import math
def bianchi_ax(data_rate, ack_rate, k, difs):
# Parameters for 11ax
nA = np.linspace(5, 50, 10)
CWmin = 15
CWmax = 1023
L_DATA = 1500 * 8 # data size in bits
L_ACK = 14 * 8 # ACK size in bits
#B = 1/(CWmin+1)
B=0
EP = L_DATA/(1-B)
T_GI = 800e-9 # guard interval in seconds
T_SYMBOL_ACK = 4e-6 # symbol duration in seconds (for ACK)
T_SYMBOL_DATA = 12.8e-6 + T_GI # symbol duration in seconds (for DATA)
T_PHY_ACK = 20e-6 # PHY preamble & header duration in seconds (for ACK)
T_PHY_DATA = 44e-6 # PHY preamble & header duration in seconds (for DATA)
L_SERVICE = 16 # service field length in bits
L_TAIL = 6 # tail lengthh in bits
L_MAC = (30) * 8 # MAC header size in bits
L_APP_HDR = 8 * 8 # bits added by the upper layer(s)
T_SIFS = 16e-6
T_DIFS = 34e-6
T_SLOT = 9e-6
delta = 1e-7
Aggregation_Type = 'A_MPDU' #A_MPDU or A_MSDU (HYBRID not fully supported)
K_MSDU = 1
K_MPDU = k
L_MPDU_HEADER = 4
L_MSDU_HEADER = 14 * 8
if (k <= 1):
Aggregation_Type = 'NONE'
N_DBPS = data_rate * T_SYMBOL_DATA # number of data bits per OFDM symbol
if (Aggregation_Type == 'NONE'):
N_SYMBOLS = math.ceil((L_SERVICE + (L_MAC + L_DATA + L_APP_HDR) + L_TAIL)/N_DBPS)
T_DATA = T_PHY_DATA + (T_SYMBOL_DATA * N_SYMBOLS)
K_MPDU = 1
K_MSDU = 1
if (Aggregation_Type == 'A_MSDU'):
N_SYMBOLS = math.ceil((L_SERVICE + K_MPDU*(L_MAC + L_MPDU_HEADER + K_MSDU*(L_MSDU_HEADER + L_DATA + L_APP_HDR)) + L_TAIL)/N_DBPS)
T_DATA = T_PHY_DATA + (T_SYMBOL_DATA * N_SYMBOLS)
if (Aggregation_Type == 'A_MPDU'):
N_SYMBOLS = math.ceil((L_SERVICE + K_MPDU*(L_MAC + L_MPDU_HEADER + L_DATA + L_APP_HDR) + L_TAIL)/N_DBPS)
T_DATA = T_PHY_DATA + (T_SYMBOL_DATA * N_SYMBOLS)
#Calculate ACK Duration
N_DBPS = ack_rate * T_SYMBOL_ACK # number of data bits per OFDM symbol
N_SYMBOLS = math.ceil((L_SERVICE + L_ACK + L_TAIL)/N_DBPS)
T_ACK = T_PHY_ACK + (T_SYMBOL_ACK * N_SYMBOLS)
T_s = T_DATA + T_SIFS + T_ACK + T_DIFS
if difs == 1: #DIFS
T_C = T_DATA + T_DIFS
else:
T_s = T_DATA + T_SIFS + T_ACK + T_DIFS + delta
T_C = T_DATA + T_DIFS + T_SIFS + T_ACK + delta
T_S = T_s/(1-B) + T_SLOT
S_bianchi = np.zeros(len(nA))
for j in range(len(nA)):
n = nA[j]*1
W = CWmin + 1
m = math.log2((CWmax + 1)/(CWmin + 1))
tau1 = np.linspace(0, 0.1, 100000)
p = 1 - np.power((1 - tau1),(n - 1))
ps = p*0
for i in range(int(m)):
ps = ps + np.power(2*p, i)
taup = 2./(1 + W + p*W*ps)
b = np.argmin(np.abs(tau1 - taup))
tau = taup[b]
Ptr = 1 - math.pow((1 - tau), int(n))
Ps = n*tau*math.pow((1 - tau), int(n-1))/Ptr
S_bianchi[j] = K_MSDU*K_MPDU*Ps*Ptr*EP/((1-Ptr)*T_SLOT+Ptr*Ps*T_S+Ptr*(1-Ps)*T_C)/1e6
bianchi_result = S_bianchi
return bianchi_result
def str_result(bianchi_result, mcs, bw):
str_bianchi = ' {' + '\"HeMcs{:d}'.format(mcs) + '_{:d}MHz\"'.format(bw) + ', {\n'
for i in range (len(bianchi_result)):
str_tmp = ' {' + '{:d}, {:.4f}'.format(5*(i+1), bianchi_result[i]) +'},\n'
str_bianchi = str_bianchi + str_tmp
str_bianchi = str_bianchi + " }},\n"
print(str_bianchi)
return str_bianchi
# Settings for different MCS and mode
data_rates_20MHz = [8.603e6, 17.206e6, 25.8e6, 34.4e6, 51.5e6, 68.8e6, 77.4e6, 86e6, 103.2e6, 114.7e6, 129e6, 143.4e6]
ack_rates_20MHz = [6e6, 12e6, 12e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6]
data_rates_40MHz = [17.2e6, 34.4e6, 51.5e6, 68.8e6, 103.2e6, 137.6e6, 154.9e6, 172.1e6, 206.5e6, 229.4e6, 258.1e6, 286.8e6]
ack_rates_40MHz = [6e6, 12e6, 12e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6]
data_rates_80MHz = [36e6, 72.1e6, 108.1e6, 144.1e6, 216.2e6, 288.2e6, 324.3e6, 360.3e6, 432.4e6, 480.4e6, 540.4e6, 600.5e6]
ack_rates_80MHz = [6e6, 12e6, 12e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6]
data_rates_160MHz = [72.1e6, 144.1e6, 216.2e6, 288.2e6, 432.4e6, 576.5e6, 648.5e6, 720.6e6, 864.7e6, 960.8e6, 1080.9e6, 1201e6]
ack_rates_160MHz = [6e6, 12e6, 12e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6, 24e6]
# Generate results with frame aggregation disabled
k = 1
difs = 1
fo = open("bianchi_11ax_difs.txt", "w")
for i in range(len(data_rates_20MHz)):
bianchi_result = bianchi_ax(data_rates_20MHz[i], ack_rates_20MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 20)
fo.write(str_s)
for i in range(len(data_rates_40MHz)):
bianchi_result = bianchi_ax(data_rates_40MHz[i], ack_rates_40MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 40)
fo.write(str_s)
for i in range(len(data_rates_80MHz)):
bianchi_result = bianchi_ax(data_rates_80MHz[i], ack_rates_80MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 80)
fo.write(str_s)
for i in range(len(data_rates_160MHz)):
bianchi_result = bianchi_ax(data_rates_160MHz[i], ack_rates_160MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 160)
fo.write(str_s)
fo.close()
difs = 0
fo = open("bianchi_11ax_eifs.txt", "w")
for i in range(len(data_rates_20MHz)):
bianchi_result = bianchi_ax(data_rates_20MHz[i], ack_rates_20MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 20)
fo.write(str_s)
for i in range(len(data_rates_40MHz)):
bianchi_result = bianchi_ax(data_rates_40MHz[i], ack_rates_40MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 40)
fo.write(str_s)
for i in range(len(data_rates_80MHz)):
bianchi_result = bianchi_ax(data_rates_80MHz[i], ack_rates_80MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 80)
fo.write(str_s)
for i in range(len(data_rates_160MHz)):
bianchi_result = bianchi_ax(data_rates_160MHz[i], ack_rates_160MHz[i], k, difs)
str_s = str_result(bianchi_result, i, 160)
fo.write(str_s)
fo.close()
|
nsnam/ns-3-dev-git
|
src/wifi/examples/reference/bianchi11ax.py
|
Python
|
gpl-2.0
| 6,874 | 0.010038 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^', include('ebets.urls')),
url(r'^admin/', include(admin.site.urls)),
)
|
pisskidney/dota
|
dota/urls.py
|
Python
|
mit
| 222 | 0.004505 |
# vpnr 48
run_trial(hori2, duration=4.000, speed=300)
run_trial(rws[2], duration=8.0)
run_trial(rbs[12], duration=8.0)
run_trial(rws[6], duration=8.0)
run_trial(rbs[22], duration=8.0)
run_trial(cm200, duration=8.0, speed=150)
run_trial(cm200, duration=8.0, speed=800)
run_trial(rbs[6], duration=8.0)
run_trial(msm0, duration=4.000, speed=400)
run_trial(rbs[9], duration=8.0)
run_trial(mem2, duration=3.000, speed=600)
run_trial(mem0, duration=7.000, speed=200)
run_trial(rws[16], duration=8.0)
run_trial(rws[18], duration=8.0)
run_trial(rbs[1], duration=8.0)
run_trial(rbs[10], duration=8.0)
run_trial(rws[15], duration=8.0)
run_trial(rws[21], duration=8.0)
run_trial(rbs[0], duration=8.0)
run_trial(rws[1], duration=8.0)
run_trial(mem2, duration=5.000, speed=300)
show(u'Machen Sie eine kurze Pause.\n\nWeiter mit Leertaste.', wait_keys=('space',))
run_trial(rws[12], duration=8.0)
run_trial(cm400, duration=8.0, speed=400)
run_trial(rbs[4], duration=8.0)
run_trial(rbs[19], duration=8.0)
run_trial(mem0, duration=4.000, speed=400)
run_trial(rbs[8], duration=8.0)
run_trial(rbs[11], duration=8.0)
run_trial(rws[13], duration=8.0)
run_trial(rws[8], duration=8.0)
run_trial(cm400, duration=8.0, speed=200)
run_trial(mem1, duration=5.000, speed=300)
run_trial(cm400, duration=8.0, speed=300)
run_trial(hori1, duration=6.000, speed=200)
run_trial(rbs[15], duration=8.0)
run_trial(hori0, duration=3.000, speed=400)
run_trial(msm0, duration=7.000, speed=200)
run_trial(rws[0], duration=8.0)
run_trial(mem0, duration=2.500, speed=800)
run_trial(rws[17], duration=8.0)
run_trial(cm100, duration=8.0, speed=200)
run_trial(mem0, duration=3.000, speed=600)
show(u'Machen Sie eine kurze Pause.\n\nWeiter mit Leertaste.', wait_keys=('space',))
run_trial(msm1, duration=4.000, speed=400)
run_trial(rbs[18], duration=8.0)
run_trial(mem1, duration=4.000, speed=400)
run_trial(msm2, duration=4.000, speed=400)
run_trial(mem1, duration=7.000, speed=200)
run_trial(msm2, duration=3.000, speed=600)
run_trial(mem1, duration=2.500, speed=800)
run_trial(hori0, duration=2.000, speed=600)
run_trial(mem1, duration=9.000, speed=150)
run_trial(rbs[23], duration=8.0)
run_trial(cm100, duration=8.0, speed=150)
run_trial(cm200, duration=8.0, speed=200)
run_trial(rws[5], duration=8.0)
run_trial(hori2, duration=2.000, speed=600)
run_trial(msm1, duration=2.500, speed=800)
run_trial(rws[9], duration=8.0)
run_trial(cm100, duration=8.0, speed=400)
run_trial(rbs[2], duration=8.0)
run_trial(rbs[14], duration=8.0)
run_trial(cm200, duration=8.0, speed=400)
run_trial(rbs[5], duration=8.0)
show(u'Machen Sie eine kurze Pause.\n\nWeiter mit Leertaste.', wait_keys=('space',))
run_trial(hori1, duration=8.000, speed=150)
run_trial(rws[10], duration=8.0)
run_trial(rws[19], duration=8.0)
run_trial(rws[20], duration=8.0)
run_trial(rbs[21], duration=8.0)
run_trial(hori0, duration=6.000, speed=200)
run_trial(msm0, duration=3.000, speed=600)
run_trial(rbs[13], duration=8.0)
run_trial(cm200, duration=8.0, speed=300)
run_trial(msm1, duration=3.000, speed=600)
run_trial(cm400, duration=8.0, speed=600)
run_trial(rbs[7], duration=8.0)
run_trial(rws[7], duration=8.0)
run_trial(rbs[3], duration=8.0)
run_trial(hori0, duration=8.000, speed=150)
run_trial(mem2, duration=9.000, speed=150)
run_trial(rws[4], duration=8.0)
run_trial(hori2, duration=1.500, speed=800)
run_trial(cm400, duration=8.0, speed=150)
run_trial(hori0, duration=4.000, speed=300)
run_trial(cm400, duration=8.0, speed=800)
show(u'Machen Sie eine kurze Pause.\n\nWeiter mit Leertaste.', wait_keys=('space',))
run_trial(msm1, duration=5.000, speed=300)
run_trial(msm2, duration=5.000, speed=300)
run_trial(msm0, duration=2.500, speed=800)
run_trial(mem2, duration=4.000, speed=400)
run_trial(cm200, duration=8.0, speed=600)
run_trial(hori1, duration=1.500, speed=800)
run_trial(msm0, duration=9.000, speed=150)
run_trial(hori0, duration=1.500, speed=800)
run_trial(mem2, duration=2.500, speed=800)
run_trial(rbs[24], duration=8.0)
run_trial(msm2, duration=9.000, speed=150)
run_trial(hori1, duration=4.000, speed=300)
run_trial(rbs[16], duration=8.0)
run_trial(rbs[17], duration=8.0)
run_trial(msm2, duration=2.500, speed=800)
run_trial(mem1, duration=3.000, speed=600)
run_trial(msm1, duration=9.000, speed=150)
run_trial(rws[11], duration=8.0)
run_trial(hori2, duration=8.000, speed=150)
run_trial(hori1, duration=2.000, speed=600)
run_trial(msm2, duration=7.000, speed=200)
show(u'Machen Sie eine kurze Pause.\n\nWeiter mit Leertaste.', wait_keys=('space',))
run_trial(mem0, duration=5.000, speed=300)
run_trial(hori2, duration=6.000, speed=200)
run_trial(msm0, duration=5.000, speed=300)
run_trial(rws[22], duration=8.0)
run_trial(cm100, duration=8.0, speed=300)
run_trial(mem0, duration=9.000, speed=150)
run_trial(rws[23], duration=8.0)
run_trial(rws[14], duration=8.0)
run_trial(rws[24], duration=8.0)
run_trial(msm1, duration=7.000, speed=200)
run_trial(rws[3], duration=8.0)
run_trial(cm100, duration=8.0, speed=800)
run_trial(hori2, duration=3.000, speed=400)
run_trial(rbs[20], duration=8.0)
run_trial(hori1, duration=3.000, speed=400)
run_trial(mem2, duration=7.000, speed=200)
run_trial(cm100, duration=8.0, speed=600)
run_movie(movie1audio, 'Jetzt folgt ein Video mit Ton.\n\nWeiter mit Leertaste')
run_movie(movie2noaudio, 'Jetzt folgt ein Video OHNE Ton.\n\nWeiter mit Leertaste')
|
derNarr/synchronicity
|
experiment/sessions/ses_vp48.py
|
Python
|
mit
| 5,332 | 0.001313 |
from django.test import TestCase
from api.helpers import user_service
from api.factories import UserFactory, PostFactory
class UserServiceTest(TestCase):
POSTS_PER_USER = 10
def setUp(self):
self.main_user = UserFactory()
self.follower = UserFactory()
self.test_user = UserFactory()
self.main_user.followers.add(self.follower)
self.follower.following.add(self.main_user)
for i in range(0, self.POSTS_PER_USER):
PostFactory(creator=self.main_user)
PostFactory(creator=self.test_user)
PostFactory(creator=self.follower)
def test_user_feed_returns_posts_from_correct_users(self):
posts = user_service.get_user_feed(self.follower.id, 0, 20)
self.assertEqual(len(posts), self.POSTS_PER_USER * 2)
for post in posts:
self.assertIn(post.creator_id, [self.main_user.id, self.follower.id])
def test_user_feed_returns_posts_ordered_correctly(self):
posts = user_service.get_user_feed(self.follower.id, 0, 20)
for i in range(0, len(posts) - 1):
self.assertGreater(posts[i].created_at, posts[i + 1].created_at)
def test_user_feed_returns_correct_pages(self):
pass
|
frostblooded/kanq
|
api/tests/test_user_service.py
|
Python
|
mit
| 1,237 | 0.000808 |
# -*- coding: utf-8 -*-
# Author: Joris Jensen <jjensen@techfak.uni-bielefeld.de>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
from scipy.optimize import minimize
from sklearn.utils import validation
from .rslvq import RslvqModel
class LmrslvqModel(RslvqModel):
"""Localized Matrix Robust Soft Learning Vector Quantization
Parameters
----------
prototypes_per_class : int or list of int, optional (default=1)
Number of prototypes per class. Use list to specify different
numbers per class.
initial_prototypes : array-like, shape = [n_prototypes, n_features + 1],
optional
Prototypes to start with. If not given initialization near the class
means. Class label must be placed as last entry of each prototype.
initial_matrices : list of array-like, optional
Matrices to start with. If not given random initialization
regularization : float or array-like, shape = [n_classes/n_prototypes],
optional (default=0.0)
Values between 0 and 1. Regularization is done by the log determinant
of the relevance matrix. Without regularization relevances may
degenerate to zero.
dim : int, optional
Maximum rank or projection dimensions
classwise : boolean, optional
If true, each class has one relevance matrix.
If false, each prototype has one relevance matrix.
sigma : float, optional (default=0.5)
Variance for the distribution.
max_iter : int, optional (default=2500)
The maximum number of iterations.
gtol : float, optional (default=1e-5)
Gradient norm must be less than gtol before successful termination
of bfgs.
display : boolean, optional (default=False)
Print information about the bfgs steps.
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
w_ : array-like, shape = [n_prototypes, n_features]
Prototype vector, where n_prototypes in the number of prototypes and
n_features is the number of features
c_w_ : array-like, shape = [n_prototypes]
Prototype classes
classes_ : array-like, shape = [n_classes]
Array containing labels.
omegas_ : list of array-like
Relevance Matrices
dim_ : list of int
Maximum rank of projection
regularization_ : array-like, shape = [n_classes/n_prototypes]
Values between 0 and 1
See also
--------
RslvqModel, MrslvqModel
"""
def __init__(self, prototypes_per_class=1, initial_prototypes=None,
initial_matrices=None, regularization=0.0, dim=None,
classwise=False, sigma=1, max_iter=2500, gtol=1e-5, display=False,
random_state=None):
super(LmrslvqModel, self).__init__(sigma=sigma,
random_state=random_state,
prototypes_per_class=prototypes_per_class,
initial_prototypes=initial_prototypes,
gtol=gtol, display=display, max_iter=max_iter)
self.regularization = regularization
self.initial_matrices = initial_matrices
self.classwise = classwise
self.initialdim = dim
def _optgrad(self, variables, training_data, label_equals_prototype,
random_state, lr_relevances=0, lr_prototypes=1):
n_data, n_dim = training_data.shape
nb_prototypes = self.c_w_.size
variables = variables.reshape(variables.size // n_dim, n_dim)
prototypes = variables[:nb_prototypes]
# dim to indices
indices = []
for i in range(len(self.dim_)):
indices.append(sum(self.dim_[:i + 1]))
omegas = np.split(variables[nb_prototypes:], indices[:-1]) # .conj().T
g = np.zeros(variables.shape)
if lr_relevances > 0:
gw = []
for i in range(len(omegas)):
gw.append(np.zeros(omegas[i].shape))
c = 1 / self.sigma
for i in range(n_data):
xi = training_data[i]
c_xi = label_equals_prototype[i]
for j in range(prototypes.shape[0]):
if len(omegas) == nb_prototypes:
omega_index = j
else:
omega_index = np.where(self.classes_ == self.c_w_[j])[0][0]
oo = omegas[omega_index].T.dot(omegas[omega_index])
d = (xi - prototypes[j])[np.newaxis].T
p = self._p(j, xi, prototypes=prototypes, omega=omegas[omega_index])
if self.c_w_[j] == c_xi:
pj = self._p(j, xi, prototypes=prototypes, y=c_xi,
omega=omegas[omega_index])
if lr_prototypes > 0:
if self.c_w_[j] == c_xi:
g[j] += (c * (pj - p) * oo.dot(d)).ravel()
else:
g[j] -= (c * p * oo.dot(d)).ravel()
if lr_relevances > 0:
if self.c_w_[j] == c_xi:
gw -= (pj - p) / self.sigma * (
omegas[omega_index].dot(d).dot(d.T))
else:
gw += p / self.sigma * (omegas[omega_index].dot(d).dot(d.T))
if lr_relevances > 0:
if sum(self.regularization_) > 0:
regmatrices = np.zeros([sum(self.dim_), n_dim])
for i in range(len(omegas)):
regmatrices[sum(self.dim_[:i + 1]) - self.dim_[i]:sum(
self.dim_[:i + 1])] = \
self.regularization_[i] * np.linalg.pinv(omegas[i])
g[nb_prototypes:] = 2 / n_data * lr_relevances * \
np.concatenate(gw) - regmatrices
else:
g[nb_prototypes:] = 2 / n_data * lr_relevances * \
np.concatenate(gw)
if lr_prototypes > 0:
g[:nb_prototypes] = 1 / n_data * \
lr_prototypes * g[:nb_prototypes]
g *= -(1 + 0.0001 * random_state.rand(*g.shape) - 0.5)
return g.ravel()
def _optfun(self, variables, training_data, label_equals_prototype):
n_data, n_dim = training_data.shape
nb_prototypes = self.c_w_.size
variables = variables.reshape(variables.size // n_dim, n_dim)
prototypes = variables[:nb_prototypes]
indices = []
for i in range(len(self.dim_)):
indices.append(sum(self.dim_[:i + 1]))
omegas = np.split(variables[nb_prototypes:], indices[:-1])
out = 0
for i in range(n_data):
xi = training_data[i]
y = label_equals_prototype[i]
if len(omegas) == nb_prototypes:
fs = [self._costf(xi, prototypes[j], omega=omegas[j])
for j in range(nb_prototypes)]
else:
fs = [self._costf(xi, prototypes[j], omega=omegas[np.where(self.classes_ == self.c_w_[j])[0][0]])
for j in range(nb_prototypes)]
fs_max = max(fs)
s1 = sum([np.math.exp(fs[i] - fs_max) for i in range(len(fs))
if self.c_w_[i] == y])
s2 = sum([np.math.exp(f - fs_max) for f in fs])
s1 += 0.0000001
s2 += 0.0000001
out += np.math.log(s1 / s2)
return -out
def _optimize(self, x, y, random_state):
nb_prototypes, nb_features = self.w_.shape
nb_classes = len(self.classes_)
if not isinstance(self.classwise, bool):
raise ValueError("classwise must be a boolean")
if self.initialdim is None:
if self.classwise:
self.dim_ = nb_features * np.ones(nb_classes, dtype=np.int)
else:
self.dim_ = nb_features * np.ones(nb_prototypes, dtype=np.int)
else:
self.dim_ = validation.column_or_1d(self.initialdim)
if self.dim_.size == 1:
if self.classwise:
self.dim_ = self.dim_[0] * np.ones(nb_classes,
dtype=np.int)
else:
self.dim_ = self.dim_[0] * np.ones(nb_prototypes,
dtype=np.int)
elif self.classwise and self.dim_.size != nb_classes:
raise ValueError("dim length must be number of classes")
elif self.dim_.size != nb_prototypes:
raise ValueError("dim length must be number of prototypes")
if self.dim_.min() <= 0:
raise ValueError("dim must be a list of positive ints")
# initialize psis (psis is list of arrays)
if self.initial_matrices is None:
self.omegas_ = []
for d in self.dim_:
self.omegas_.append(
random_state.rand(d, nb_features) * 2.0 - 1.0)
else:
if not isinstance(self.initial_matrices, list):
raise ValueError("initial matrices must be a list")
self.omegas_ = list(map(lambda v: validation.check_array(v),
self.initial_matrices))
if self.classwise:
if len(self.omegas_) != nb_classes:
raise ValueError("length of matrices wrong\n"
"found=%d\n"
"expected=%d" % (
len(self.omegas_), nb_classes))
elif np.sum(map(lambda v: v.shape[1],
self.omegas_)) != nb_features * \
len(self.omegas_):
raise ValueError(
"each matrix should have %d columns" % nb_features)
elif len(self.omegas_) != nb_prototypes:
raise ValueError("length of matrices wrong\n"
"found=%d\n"
"expected=%d" % (
len(self.omegas_), nb_classes))
elif np.sum([v.shape[1] for v in self.omegas_]) != \
nb_features * len(self.omegas_):
raise ValueError(
"each matrix should have %d columns" % nb_features)
if isinstance(self.regularization, float):
if self.regularization < 0:
raise ValueError('regularization must be a positive float')
self.regularization_ = np.repeat(self.regularization,
len(self.omegas_))
else:
self.regularization_ = validation.column_or_1d(self.regularization)
if self.classwise:
if self.regularization_.size != nb_classes:
raise ValueError(
"length of regularization must be number of classes")
else:
if self.regularization_.size != self.w_.shape[0]:
raise ValueError(
"length of regularization "
"must be number of prototypes")
variables = np.append(self.w_, np.concatenate(self.omegas_), axis=0)
label_equals_prototype = y
res = minimize(
fun=lambda vs: self._optfun(
vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs: self._optgrad(
vs, x, label_equals_prototype=label_equals_prototype,
lr_prototypes=0, lr_relevances=1, random_state=random_state),
method='L-BFGS-B',
x0=variables, options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = res.nit
res = minimize(
fun=lambda vs: self._optfun(
vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs: self._optgrad(
vs, x, label_equals_prototype=label_equals_prototype,
lr_prototypes=0, lr_relevances=1, random_state=random_state),
method='L-BFGS-B',
x0=res.x, options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = max(n_iter, res.nit)
res = minimize(
fun=lambda vs: self._optfun(
vs, x, label_equals_prototype=label_equals_prototype),
jac=lambda vs: self._optgrad(
vs, x, label_equals_prototype=label_equals_prototype,
lr_prototypes=1, lr_relevances=1, random_state=random_state),
method='L-BFGS-B',
x0=res.x, options={'disp': self.display, 'gtol': self.gtol,
'maxiter': self.max_iter})
n_iter = max(n_iter, res.nit)
out = res.x.reshape(res.x.size // nb_features, nb_features)
self.w_ = out[:nb_prototypes]
indices = []
for i in range(len(self.dim_)):
indices.append(sum(self.dim_[:i + 1]))
self.omegas_ = np.split(out[nb_prototypes:], indices[:-1]) # .conj().T
self.n_iter_ = n_iter
def _f(self, x, i):
d = (x - self.w_[i])[np.newaxis].T
d = d.T.dot(self.omegas_[i].T).dot(self.omegas_[i]).dot(d)
return -d / (2 * self.sigma)
def _costf(self, x, w, **kwargs):
if 'omega' in kwargs:
omega = kwargs['omega']
else:
omega = self.omegas_[np.where(self.w_ == w)[0][0]]
d = (x - w)[np.newaxis].T
d = d.T.dot(omega.T).dot(omega).dot(d)
return -d / (2 * self.sigma)
def _compute_distance(self, x, w=None):
if w is None:
w = self.w_
def foo(e):
fun = np.vectorize(lambda w: self._costf(e, w),
signature='(n)->()')
return fun(w)
return np.vectorize(foo, signature='(n)->()')(x)
def project(self, x, prototype_idx, dims, print_variance_covered=False):
"""Projects the data input data X using the relevance matrix of the
prototype specified by prototype_idx to dimension dim
Parameters
----------
x : array-like, shape = [n,n_features]
input data for project
prototype_idx : int
index of the prototype
dims : int
dimension to project to
print_variance_covered : boolean
flag to print the covered variance of the projection
Returns
--------
C : array, shape = [n,n_features]
Returns predicted values.
"""
nb_prototypes = self.w_.shape[0]
if len(self.omegas_) != nb_prototypes \
or self.prototypes_per_class != 1:
print('project only possible with classwise relevance matrix')
# y = self.predict(X)
v, u = np.linalg.eig(
self.omegas_[prototype_idx].T.dot(self.omegas_[prototype_idx]))
idx = v.argsort()[::-1]
if print_variance_covered:
print('variance coverd by projection:',
v[idx][:dims].sum() / v.sum() * 100)
return x.dot(u[:, idx][:, :dims].dot(np.diag(np.sqrt(v[idx][:dims]))))
|
MrNuggelz/sklearn-glvq
|
sklearn_lvq/lmrslvq.py
|
Python
|
bsd-3-clause
| 15,649 | 0.000511 |
"""
Windows Process Control
winprocess.run launches a child process and returns the exit code.
Optionally, it can:
redirect stdin, stdout & stderr to files
run the command as another user
limit the process's running time
control the process window (location, size, window state, desktop)
Works on Windows NT, 2000 & XP. Requires Mark Hammond's win32
extensions.
This code is free for any purpose, with no warranty of any kind.
-- John B. Dell'Aquila <jbd@alum.mit.edu>
"""
import win32api, win32process, win32security
import win32event, win32con, msvcrt, win32gui
def logonUser(loginString):
"""
Login as specified user and return handle.
loginString: 'Domain\nUser\nPassword'; for local
login use . or empty string as domain
e.g. '.\nadministrator\nsecret_password'
"""
domain, user, passwd = loginString.split('\n')
return win32security.LogonUser(
user,
domain,
passwd,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT
)
class Process:
"""
A Windows process.
"""
def __init__(self, cmd, login=None,
hStdin=None, hStdout=None, hStderr=None,
show=1, xy=None, xySize=None,
desktop=None):
"""
Create a Windows process.
cmd: command to run
login: run as user 'Domain\nUser\nPassword'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller's stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. 'winsta0\\default'
None = inherit current desktop
'' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator's account
(ordinary user can't access current desktop - see Microsoft
Q165194) OR use desktop='' to run another desktop invisibly
(may be very slow to startup & finalize).
"""
si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^
win32con.STARTF_USESHOWWINDOW)
if hStdin is None:
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if hStdout is None:
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if hStderr is None:
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if xy is not None:
si.dwX, si.dwY = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if xySize is not None:
si.dwXSize, si.dwYSize = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if desktop is not None:
si.lpDesktop = desktop
procArgs = (None, # appName
cmd, # commandLine
None, # processAttributes
None, # threadAttributes
1, # bInheritHandles
win32process.CREATE_NEW_CONSOLE, # dwCreationFlags
None, # newEnvironment
None, # currentDirectory
si) # startupinfo
if login is not None:
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
self.hProcess, self.hThread, self.PId, self.TId = procHandles
def wait(self, mSec=None):
"""
Wait for process to finish or for specified number of
milliseconds to elapse.
"""
if mSec is None:
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
def kill(self, gracePeriod=5000):
"""
Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.
"""
win32gui.EnumWindows(self.__close__, 0)
if self.wait(gracePeriod) != win32event.WAIT_OBJECT_0:
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100) # wait for resources to be released
def __close__(self, hwnd, dummy):
"""
EnumWindows callback - sends WM_CLOSE to any window
owned by this process.
"""
TId, PId = win32process.GetWindowThreadProcessId(hwnd)
if PId == self.PId:
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
def exitCode(self):
"""
Return process exit code.
"""
return win32process.GetExitCodeProcess(self.hProcess)
def run(cmd, mSec=None, stdin=None, stdout=None, stderr=None, **kw):
"""
Run cmd as a child process and return exit code.
mSec: terminate cmd after specified number of milliseconds
stdin, stdout, stderr:
file objects for child I/O (use hStdin etc. to attach
handles instead of files); default is caller's stdin,
stdout & stderr;
kw: see Process.__init__ for more keyword options
"""
if stdin is not None:
kw['hStdin'] = msvcrt.get_osfhandle(stdin.fileno())
if stdout is not None:
kw['hStdout'] = msvcrt.get_osfhandle(stdout.fileno())
if stderr is not None:
kw['hStderr'] = msvcrt.get_osfhandle(stderr.fileno())
child = Process(cmd, **kw)
if child.wait(mSec) != win32event.WAIT_OBJECT_0:
child.kill()
raise WindowsError, 'process timeout exceeded'
return child.exitCode()
if __name__ == '__main__':
# Pipe commands to a shell and display the output in notepad
print 'Testing winprocess.py...'
import tempfile
timeoutSeconds = 15
cmdString = """\
REM Test of winprocess.py piping commands to a shell.\r
REM This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds
cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
cmd.write(cmdString)
cmd.seek(0)
print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
stdout=out, stderr=out)
cmd.close()
print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
show=win32con.SW_MAXIMIZE,
mSec=timeoutSeconds*1000)
out.close()
|
alexei-matveev/ccp1gui
|
jobmanager/winprocess.py
|
Python
|
gpl-2.0
| 7,039 | 0.001421 |
from __future__ import print_function
from twython import Twython
import util
class TwitterBot(util.SocialMediaBot):
""" Social Media Bot for posting updates to Tumblr """
NAME = "twitter"
def __init__(self, **kwargs):
super(TwitterBot, self).__init__(**kwargs)
self.client = Twython(*self.oauth_config)
def post_update(self):
text = self.generate_text(limit_characters=140)
self.client.update_status(status=text)
if __name__ == "__main__":
twitterbot = TwitterBot()
twitterbot.post_update()
|
thedeadparrot/ficbot
|
twitterbot.py
|
Python
|
mit
| 555 | 0.003604 |
# This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Androguard is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Androguard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Androguard. If not, see <http://www.gnu.org/licenses/>.
from androguard.core import bytecode
from androguard.core.androconf import CONF, debug
import sys, re
from struct import pack, unpack, calcsize
DEX_FILE_MAGIC = 'dex\n035\x00'
ODEX_FILE_MAGIC_35 = 'dey\n035\x00'
ODEX_FILE_MAGIC_36 = 'dey\n036\x00'
TYPE_MAP_ITEM = {
0x0 : "TYPE_HEADER_ITEM",
0x1 : "TYPE_STRING_ID_ITEM",
0x2 : "TYPE_TYPE_ID_ITEM",
0x3 : "TYPE_PROTO_ID_ITEM",
0x4 : "TYPE_FIELD_ID_ITEM",
0x5 : "TYPE_METHOD_ID_ITEM",
0x6 : "TYPE_CLASS_DEF_ITEM",
0x1000 : "TYPE_MAP_LIST",
0x1001 : "TYPE_TYPE_LIST",
0x1002 : "TYPE_ANNOTATION_SET_REF_LIST",
0x1003 : "TYPE_ANNOTATION_SET_ITEM",
0x2000 : "TYPE_CLASS_DATA_ITEM",
0x2001 : "TYPE_CODE_ITEM",
0x2002 : "TYPE_STRING_DATA_ITEM",
0x2003 : "TYPE_DEBUG_INFO_ITEM",
0x2004 : "TYPE_ANNOTATION_ITEM",
0x2005 : "TYPE_ENCODED_ARRAY_ITEM",
0x2006 : "TYPE_ANNOTATIONS_DIRECTORY_ITEM",
}
ACCESS_FLAGS = [
(0x1 , 'public'),
(0x2 , 'private'),
(0x4 , 'protected'),
(0x8 , 'static'),
(0x10 , 'final'),
(0x20 , 'synchronized'),
(0x40 , 'bridge'),
(0x80 , 'varargs'),
(0x100 , 'native'),
(0x200 , 'interface'),
(0x400 , 'abstract'),
(0x800 , 'strict'),
(0x1000 , 'synthetic'),
(0x4000 , 'enum'),
(0x8000 , 'unused'),
(0x10000, 'constructor'),
(0x20000, 'synchronized'),
]
TYPE_DESCRIPTOR = {
'V': 'void',
'Z': 'boolean',
'B': 'byte',
'S': 'short',
'C': 'char',
'I': 'int',
'J': 'long',
'F': 'float',
'D': 'double',
'STR': 'String',
'StringBuilder': 'String'
}
def get_access_flags_string(value) :
"""
Transform an access flags to the corresponding string
:param value: the value of the access flags
:type value: int
:rtype: string
"""
buff = ""
for i in ACCESS_FLAGS :
if (i[0] & value) == i[0] :
buff += i[1] + " "
if buff != "" :
return buff[:-1]
return buff
def get_type(atype, size=None):
"""
Retrieve the type of a descriptor (e.g : I)
"""
if atype.startswith('java.lang'):
atype = atype.replace('java.lang.', '')
res = TYPE_DESCRIPTOR.get(atype.lstrip('java.lang'))
if res is None:
if atype[0] == 'L':
res = atype[1:-1].replace('/', '.')
elif atype[0] == '[':
if size is None:
res = '%s[]' % get_type(atype[1:])
else:
res = '%s[%s]' % (get_type(atype[1:]), size)
else:
res = atype
return res
MATH_DVM_OPCODES = { "add." : '+',
"div." : '/',
"mul." : '*',
"or." : '|',
"sub." : '-',
"and." : '&',
"xor." : '^',
"shl." : "<<",
"shr." : ">>",
}
FIELD_READ_DVM_OPCODES = [ ".get" ]
FIELD_WRITE_DVM_OPCODES = [ ".put" ]
BREAK_DVM_OPCODES = [ "invoke.", "move.", ".put", "if." ]
BRANCH_DVM_OPCODES = [ "throw", "throw.", "if.", "goto", "goto.", "return", "return.", "packed-switch$", "sparse-switch$" ]
def clean_name_instruction( instruction ) :
op_value = instruction.get_op_value()
# goto range
if op_value >= 0x28 and op_value <= 0x2a :
return "goto"
return instruction.get_name()
def static_operand_instruction( instruction ) :
buff = ""
if isinstance(instruction, Instruction) :
# get instructions without registers
for val in instruction.get_literals() :
buff += "%s" % val
op_value = instruction.get_op_value()
if op_value == 0x1a or op_value == 0x1b :
buff += instruction.get_string()
return buff
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def dot_buff(ins, idx) :
op_value = ins.get_op_value()
if op_value == 0x300 :
return ins.get_name() + " " + ins.get_output(idx).replace("\"", "")
elif op_value == 0x1a :
return ins.get_name() + " " + ins.get_output(idx).replace("\"", "") #"".join(html_escape_table.get(c,c) for c in ins.get_output())
return ins.get_name() + " " + ins.get_output(idx)
def readuleb128(buff) :
result = ord( buff.read(1) )
if result > 0x7f :
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f :
cur = ord( buff.read(1) )
if cur > 0x0f :
raise("prout")
result |= cur << 28
return result
def readusleb128(buff) :
result = ord( buff.read(1) )
if result > 0x7f :
cur = ord( buff.read(1) )
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 14
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= (cur & 0x7f) << 21
if cur > 0x7f :
cur = ord( buff.read(1) )
result |= cur << 28
return result
def readuleb128p1(buff) :
return readuleb128( buff ) - 1
def readsleb128(buff) :
result = unpack( '=b', buff.read(1) )[0]
if result <= 0x7f :
result = (result << 25)
if result > 0x7fffffff :
result = (0x7fffffff & result) - 0x80000000
result = result >> 25
else :
cur = unpack( '=b', buff.read(1) )[0]
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f :
result = (result << 18) >> 18
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 14
if cur <= 0x7f :
result = (result << 11) >> 11
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= (cur & 0x7f) << 21
if cur <= 0x7f :
result = (result << 4) >> 4
else :
cur = unpack( '=b', buff.read(1) )[0]
result |= cur << 28
return result
def get_sbyte(buff) :
return unpack( '=b', buff.read(1) )[0]
def readsleb128_2(buff) :
result = get_sbyte(buff)
if result <= 0x7f :
result = (result << 25) >> 25
else :
cur = get_sbyte(buff)
result = (result & 0x7f) | ((cur & 0x7f) << 7)
if cur <= 0x7f :
result = (result << 18) >> 18
else :
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 14
if cur <= 0x7f :
result = (result << 11) >> 11
else :
cur = get_sbyte(buff)
result |= (cur & 0x7f) << 21
if cur <= 0x7f :
result = (result << 4) >> 4
else :
cur = get_sbyte(buff)
result |= cur << 28
return result
def writeuleb128(value) :
remaining = value >> 7
buff = ""
while remaining > 0 :
buff += pack( "=B", ((value & 0x7f) | 0x80) )
value = remaining
remaining >>= 7
buff += pack( "=B", value & 0x7f )
return buff
def writesleb128(value) :
remaining = value >> 7
hasMore = True
end = 0
buff = ""
if (value & (-sys.maxint - 1)) == 0 :
end = 0
else :
end = -1
while hasMore :
hasMore = (remaining != end) or ((remaining & 1) != ((value >> 6) & 1))
tmp = 0
if hasMore :
tmp = 0x80
buff += pack( "=B", (value & 0x7f) | (tmp) )
value = remaining
remaining >>= 7
return buff
def determineNext(i, end, m) :
op_value = i.get_op_value()
# throw + return*
if (op_value == 0x27) or (0x0e <= op_value <= 0x11) :
return [ -1 ]
# goto
elif 0x28 <= op_value <= 0x2a :
off = i.get_ref_off() * 2
return [ off + end ]
# if
elif 0x32 <= op_value <= 0x3d :
off = i.get_ref_off() * 2
return [ end + i.get_length(), off + (end) ]
# sparse/packed
elif op_value in (0x2b, 0x2c) :
x = []
x.append( end + i.get_length() )
code = m.get_code().get_bc()
off = i.get_ref_off() * 2
data = code.get_ins_off( off + end )
if data != None :
for target in data.get_targets() :
x.append( target*2 + end )
return x
return []
def determineException(vm, m) :
# no exceptions !
if m.get_code().get_tries_size() <= 0 :
return []
h_off = {}
handler_catch_list = m.get_code().get_handlers()
for try_item in m.get_code().get_tries() :
offset_handler = try_item.get_handler_off() + handler_catch_list.get_off()
if offset_handler in h_off :
h_off[ offset_handler ].append( [ try_item ] )
else :
h_off[ offset_handler ] = []
h_off[ offset_handler ].append( [ try_item ] )
#print m.get_name(), "\t HANDLER_CATCH_LIST SIZE", handler_catch_list.size, handler_catch_list.get_offset()
for handler_catch in handler_catch_list.get_list() :
if handler_catch.get_off() not in h_off :
continue
for i in h_off[ handler_catch.get_off() ] :
i.append( handler_catch )
exceptions = []
#print m.get_name(), h_off
for i in h_off :
for value in h_off[ i ] :
try_value = value[0]
z = [ try_value.get_start_addr() * 2, (try_value.get_start_addr() * 2) + (try_value.get_insn_count() * 2) - 1 ]
handler_catch = value[1]
if handler_catch.get_size() <= 0 :
z.append( [ "any", handler_catch.get_catch_all_addr() * 2 ] )
for handler in handler_catch.get_handlers() :
z.append( [ vm.get_cm_type( handler.get_type_idx() ), handler.get_addr() * 2 ] )
exceptions.append( z )
#print m.get_name(), exceptions
return exceptions
class HeaderItem :
"""
This class can parse an header_item of a dex file
:param buff: a string which represents a Buff object of the header_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.magic = unpack("=Q", buff.read(8))[0]
self.checksum = unpack("=I", buff.read(4))[0]
self.signature = unpack("=20s", buff.read(20))[0]
self.file_size = unpack("=I", buff.read(4))[0]
self.header_size = unpack("=I", buff.read(4))[0]
self.endian_tag = unpack("=I", buff.read(4))[0]
self.link_size = unpack("=I", buff.read(4))[0]
self.link_off = unpack("=I", buff.read(4))[0]
self.map_off = unpack("=I", buff.read(4))[0]
self.string_ids_size = unpack("=I", buff.read(4))[0]
self.string_ids_off = unpack("=I", buff.read(4))[0]
self.type_ids_size = unpack("=I", buff.read(4))[0]
self.type_ids_off = unpack("=I", buff.read(4))[0]
self.proto_ids_size = unpack("=I", buff.read(4))[0]
self.proto_ids_off = unpack("=I", buff.read(4))[0]
self.field_ids_size = unpack("=I", buff.read(4))[0]
self.field_ids_off = unpack("=I", buff.read(4))[0]
self.method_ids_size = unpack("=I", buff.read(4))[0]
self.method_ids_off = unpack("=I", buff.read(4))[0]
self.class_defs_size = unpack("=I", buff.read(4))[0]
self.class_defs_off = unpack("=I", buff.read(4))[0]
self.data_size = unpack("=I", buff.read(4))[0]
self.data_off = unpack("=I", buff.read(4))[0]
self.map_off_obj = None
self.string_off_obj = None
self.type_off_obj = None
self.proto_off_obj = None
self.field_off_obj = None
self.method_off_obj = None
self.class_off_obj = None
self.data_off_obj = None
def reload(self) :
pass
def get_obj(self) :
if self.map_off_obj == None :
self.map_off_obj = self.__CM.get_item_by_offset( self.map_off )
if self.string_off_obj == None :
self.string_off_obj = self.__CM.get_item_by_offset( self.string_ids_off )
if self.type_off_obj == None :
self.type_off_obj = self.__CM.get_item_by_offset( self.type_ids_off )
if self.proto_off_obj == None :
self.proto_off_obj = self.__CM.get_item_by_offset( self.proto_ids_off )
if self.field_off_obj == None :
self.field_off_obj = self.__CM.get_item_by_offset( self.field_ids_off )
if self.method_off_obj == None :
self.method_off_obj = self.__CM.get_item_by_offset( self.method_ids_off )
if self.class_off_obj == None :
self.class_off_obj = self.__CM.get_item_by_offset( self.class_defs_off )
if self.data_off_obj == None :
self.data_off_obj = self.__CM.get_item_by_offset( self.data_off )
self.map_off = self.map_off_obj.get_off()
self.string_ids_size = len(self.string_off_obj)
self.string_ids_off = self.string_off_obj[0].get_off()
self.type_ids_size = len(self.type_off_obj.type)
self.type_ids_off = self.type_off_obj.get_off()
self.proto_ids_size = len(self.proto_off_obj.proto)
self.proto_ids_off = self.proto_off_obj.get_off()
self.field_ids_size = len(self.field_off_obj.elem)
self.field_ids_off = self.field_off_obj.get_off()
self.method_ids_size = len(self.method_off_obj.methods)
self.method_ids_off = self.method_off_obj.get_off()
self.class_defs_size = len(self.class_off_obj.class_def)
self.class_defs_off = self.class_off_obj.get_off()
#self.data_size = len(self.data_off_obj)
self.data_off = self.data_off_obj[0].get_off()
return pack("=Q", self.magic) + \
pack("=I", self.checksum) + \
pack("=20s", self.signature) + \
pack("=I", self.file_size) + \
pack("=I", self.header_size) + \
pack("=I", self.endian_tag) + \
pack("=I", self.link_size) + \
pack("=I", self.link_off) + \
pack("=I", self.map_off) + \
pack("=I", self.string_ids_size) + \
pack("=I", self.string_ids_off) + \
pack("=I", self.type_ids_size) + \
pack("=I", self.type_ids_off) + \
pack("=I", self.proto_ids_size) + \
pack("=I", self.proto_ids_off) + \
pack("=I", self.field_ids_size) + \
pack("=I", self.field_ids_off) + \
pack("=I", self.method_ids_size) + \
pack("=I", self.method_ids_off) + \
pack("=I", self.class_defs_size) + \
pack("=I", self.class_defs_off) + \
pack("=I", self.data_size) + \
pack("=I", self.data_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
def show(self) :
bytecode._PrintSubBanner("Header Item")
bytecode._PrintDefault("magic=%s, checksum=%s, signature=%s\n" % (self.magic, self.checksum, self.signature))
bytecode._PrintDefault("file_size=%x, header_size=%x, endian_tag=%x\n" % (self.file_size, self.header_size, self.endian_tag))
bytecode._PrintDefault("link_size=%x, link_off=%x\n" % (self.link_size, self.link_off))
bytecode._PrintDefault("map_off=%x\n" % (self.map_off))
bytecode._PrintDefault("string_ids_size=%x, string_ids_off=%x\n" % (self.string_ids_size, self.string_ids_off))
bytecode._PrintDefault("type_ids_size=%x, type_ids_off=%x\n" % (self.type_ids_size, self.type_ids_off))
bytecode._PrintDefault("proto_ids_size=%x, proto_ids_off=%x\n" % (self.proto_ids_size, self.proto_ids_off))
bytecode._PrintDefault("field_ids_size=%x, field_ids_off=%x\n" % (self.field_ids_size, self.field_ids_off))
bytecode._PrintDefault("method_ids_size=%x, method_ids_off=%x\n" % (self.method_ids_size, self.method_ids_off))
bytecode._PrintDefault("class_defs_size=%x, class_defs_off=%x\n" % (self.class_defs_size, self.class_defs_off))
bytecode._PrintDefault("data_size=%x, data_off=%x\n" % (self.data_size, self.data_off))
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
class AnnotationOffItem :
"""
This class can parse an annotation_off_item of a dex file
:param buff: a string which represents a Buff object of the annotation_off_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.annotation_off = unpack("=I", buff.read( 4 ) )[0]
def show(self) :
bytecode._PrintSubBanner("Annotation Off Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self) :
if self.annotation_off != 0 :
self.annotation_off = self.__CM.get_obj_by_offset( self.annotation_off ).get_off()
return pack("=I", self.annotation_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class AnnotationSetItem :
"""
This class can parse an annotation_set_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.annotation_off_item = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size) :
self.annotation_off_item.append( AnnotationOffItem(buff, cm) )
def get_annotation_off_item(self) :
"""
Return the offset from the start of the file to an annotation
:rtype: a list of :class:`AnnotationOffItem`
"""
return self.annotation_off_item
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Set Item")
for i in self.annotation_off_item :
i.show()
def get_obj(self) :
return pack("=I", self.size)
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.annotation_off_item)
def get_length(self) :
length = len(self.get_obj())
for i in self.annotation_off_item :
length += i.get_length()
return length
class AnnotationSetRefItem :
"""
This class can parse an annotation_set_ref_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the referenced annotation set or
0 if there are no annotations for this element.
:rtype: int
"""
return self.annotations_off
def show(self) :
bytecode._PrintSubBanner("Annotation Set Ref Item")
bytecode._PrintDefault("annotation_off=0x%x\n" % self.annotation_off)
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
class AnnotationSetRefList :
"""
This class can parse an annotation_set_ref_list_item of a dex file
:param buff: a string which represents a Buff object of the annotation_set_ref_list_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.list = []
self.size = unpack("=I", buff.read( 4 ) )[0]
for i in xrange(0, self.size) :
self.list.append( AnnotationSetRefItem(buff, cm) )
def get_list(self) :
"""
Return elements of the list
:rtype: :class:`AnnotationSetRefItem`
"""
return self.list
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Set Ref List Item")
for i in self.list :
i.show()
def get_obj(self) :
return [ i for i in self.list ]
def get_raw(self) :
return pack("=I", self.size) + ''.join(i.get_raw() for i in self.list)
class FieldAnnotation :
"""
This class can parse a field_annotation of a dex file
:param buff: a string which represents a Buff object of the field_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.field_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_field_idx(self) :
"""
Return the index into the field_ids list for the identity of the field being annotated
:rtype: int
"""
return self.get_field_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the field
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Field Annotation")
bytecode._PrintDefault( "field_idx=0x%x annotations_off=0x%x\n" % (self.field_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.field_idx) + pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
class MethodAnnotation :
"""
This class can parse a method_annotation of a dex file
:param buff: a string which represents a Buff object of the method_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self) :
"""
Return the index into the method_ids list for the identity of the method being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the method
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Method Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_raw())
class ParameterAnnotation :
"""
This class can parse a parameter_annotation of a dex file
:param buff: a string which represents a Buff object of the parameter_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.method_idx = unpack("=I", buff.read( 4 ) )[0]
self.annotations_off = unpack("=I", buff.read( 4 ) )[0]
def get_method_idx(self) :
"""
Return the index into the method_ids list for the identity of the method whose parameters are being annotated
:rtype: int
"""
return self.get_method_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the list of annotations for the method parameters
:rtype: int
"""
return self.annotations_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def show(self) :
bytecode._PrintSubBanner("Parameter Annotation")
bytecode._PrintDefault( "method_idx=0x%x annotations_off=0x%x\n" % (self.method_idx, self.annotations_off) )
def get_obj(self) :
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
return pack("=I", self.method_idx) + pack("=I", self.annotations_off)
def get_raw(self) :
return self.get_obj()
class AnnotationsDirectoryItem :
"""
This class can parse an annotations_directory_item of a dex file
:param buff: a string which represents a Buff object of the annotations_directory_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_annotations_off = unpack("=I", buff.read(4))[0]
self.annotated_fields_size = unpack("=I", buff.read(4))[0]
self.annotated_methods_size = unpack("=I", buff.read(4))[0]
self.annotated_parameters_size = unpack("=I", buff.read(4))[0]
self.field_annotations = []
for i in xrange(0, self.annotated_fields_size) :
self.field_annotations.append( FieldAnnotation( buff, cm ) )
self.method_annotations = []
for i in xrange(0, self.annotated_methods_size) :
self.method_annotations.append( MethodAnnotation( buff, cm ) )
self.parameter_annotations = []
for i in xrange(0, self.annotated_parameters_size) :
self.parameter_annotations.append( ParameterAnnotation( buff, cm ) )
def get_class_annotations_off(self) :
"""
Return the offset from the start of the file to the annotations made directly on the class,
or 0 if the class has no direct annotations
:rtype: int
"""
return self.class_annotations_off
def get_annotated_fields_size(self) :
"""
Return the count of fields annotated by this item
:rtype: int
"""
return self.annotated_fields_size
def get_annotated_methods_size(self) :
"""
Return the count of methods annotated by this item
:rtype: int
"""
return self.annotated_methods_size
def get_annotated_parameters_size(self) :
"""
Return the count of method parameter lists annotated by this item
:rtype: int
"""
return self.annotated_parameters_size
def get_field_annotations(self) :
"""
Return the list of associated field annotations
:rtype: a list of :class:`FieldAnnotation`
"""
return self.field_annotations
def get_method_annotations(self) :
"""
Return the list of associated method annotations
:rtype: a list of :class:`MethodAnnotation`
"""
return self.method_annotations
def get_parameter_annotations(self) :
"""
Return the list of associated method parameter annotations
:rtype: a list of :class:`ParameterAnnotation`
"""
return self.parameter_annotations
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotations Directory Item")
bytecode._PrintDefault("class_annotations_off=0x%x annotated_fields_size=%d annotated_methods_size=%d annotated_parameters_size=%d\n" %
( self.class_annotations_off,
self.annotated_fields_size,
self.annotated_methods_size,
self.annotated_parameters_size))
for i in self.field_annotations :
i.show()
for i in self.method_annotations :
i.show()
for i in self.parameter_annotations :
i.show()
def get_obj(self) :
if self.class_annotations_off != 0 :
self.class_annotations_off = self.__CM.get_obj_by_offset( self.class_annotations_off ).get_off()
return pack("=I", self.class_annotations_off) + \
pack("=I", self.annotated_fields_size) + \
pack("=I", self.annotated_methods_size) + \
pack("=I", self.annotated_parameters_size)
def get_raw(self) :
return self.get_obj() + \
''.join(i.get_raw() for i in self.field_annotations) + \
''.join(i.get_raw() for i in self.method_annotations) + \
''.join(i.get_raw() for i in self.parameter_annotations)
def get_length(self) :
length = len( self.get_obj() )
for i in self.field_annotations :
length += i.get_length()
for i in self.method_annotations :
length += i.get_length()
for i in self.parameter_annotations :
length += i.get_length()
return length
class TypeItem :
"""
This class can parse a type_item of a dex file
:param buff: a string which represents a Buff object of the type_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.type_idx = unpack("=H", buff.read(2))[0]
def get_type_idx(self) :
"""
Return the index into the type_ids list
:rtype: int
"""
return self.type_idx
def get_string(self) :
"""
Return the type string
:rtype: string
"""
return self.__CM.get_type( self.type_idx )
def show(self) :
bytecode._PrintSubBanner("Type Item")
bytecode._PrintDefault("type_idx=%d\n" % self.type_idx)
def get_obj(self) :
return pack("=H", self.type_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeList :
"""
This class can parse a type_list of a dex file
:param buff: a string which represents a Buff object of the type_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.pad = ""
if self.offset % 4 != 0 :
self.pad = buff.read( self.offset % 4 )
self.len_pad = len(self.pad)
self.size = unpack("=I", buff.read( 4 ) )[0]
self.list = []
for i in xrange(0, self.size) :
self.list.append( TypeItem( buff, cm ) )
def get_pad(self) :
"""
Return the alignment string
:rtype: string
"""
return self.pad
def get_type_list_off(self) :
"""
Return the offset of the item
:rtype: int
"""
return self.offset + self.len_pad
def get_string(self) :
"""
Return the concatenation of all strings
:rtype: string
"""
return ' '.join(i.get_string() for i in self.list)
def get_size(self) :
"""
Return the size of the list, in entries
:rtype: int
"""
return self.size
def get_list(self) :
"""
Return the list of TypeItem
:rtype: a list of :class:`TypeItem` objects
"""
return self.list
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset + self.len_pad
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Type List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list :
i.show()
def get_obj(self) :
return self.pad + pack("=I", self.size)
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self) :
length = len(self.get_obj())
for i in self.list :
length += i.get_length()
return length
DBG_END_SEQUENCE = 0x00 # (none) terminates a debug info sequence for a code_item
DBG_ADVANCE_PC = 0x01 # uleb128 addr_diff addr_diff: amount to add to address register advances the address register without emitting a positions entry
DBG_ADVANCE_LINE = 0x02 # sleb128 line_diff line_diff: amount to change line register by advances the line register without emitting a positions entry
DBG_START_LOCAL = 0x03 # uleb128 register_num
# uleb128p1 name_idx
# uleb128p1 type_idx
# register_num: register that will contain local name_idx: string index of the name
# type_idx: type index of the type introduces a local variable at the current address. Either name_idx or type_idx may be NO_INDEX to indicate that that value is unknown.
DBG_START_LOCAL_EXTENDED = 0x04 # uleb128 register_num uleb128p1 name_idx uleb128p1 type_idx uleb128p1 sig_idx
# register_num: register that will contain local
# name_idx: string index of the name
# type_idx: type index of the type
# sig_idx: string index of the type signature
# introduces a local with a type signature at the current address. Any of name_idx, type_idx, or sig_idx may be NO_INDEX to indicate that that value is unknown. (
# If sig_idx is -1, though, the same data could be represented more efficiently using the opcode DBG_START_LOCAL.)
# Note: See the discussion under "dalvik.annotation.Signature" below for caveats about handling signatures.
DBG_END_LOCAL = 0x05 # uleb128 register_num
# register_num: register that contained local
# marks a currently-live local variable as out of scope at the current address
DBG_RESTART_LOCAL = 0x06 # uleb128 register_num
# register_num: register to restart re-introduces a local variable at the current address.
# The name and type are the same as the last local that was live in the specified register.
DBG_SET_PROLOGUE_END = 0x07 # (none) sets the prologue_end state machine register, indicating that the next position entry that is added should be considered the end of a
# method prologue (an appropriate place for a method breakpoint). The prologue_end register is cleared by any special (>= 0x0a) opcode.
DBG_SET_EPILOGUE_BEGIN = 0x08 # (none) sets the epilogue_begin state machine register, indicating that the next position entry that is added should be considered the beginning
# of a method epilogue (an appropriate place to suspend execution before method exit). The epilogue_begin register is cleared by any special (>= 0x0a) opcode.
DBG_SET_FILE = 0x09 # uleb128p1 name_idx
# name_idx: string index of source file name; NO_INDEX if unknown indicates that all subsequent line number entries make reference to this source file name,
# instead of the default name specified in code_item
DBG_Special_Opcodes_BEGIN = 0x0a # (none) advances the line and address registers, emits a position entry, and clears prologue_end and epilogue_begin. See below for description.
DBG_Special_Opcodes_END = 0xff
DBG_LINE_BASE = -4
DBG_LINE_RANGE = 15
class DBGBytecode :
def __init__(self, cm, op_value) :
self.CM = cm
self.op_value = op_value
self.format = []
def get_op_value(self) :
return self.op_value
def add(self, value, ttype) :
self.format.append( (value, ttype) )
def get_value(self) :
if self.get_op_value() == DBG_START_LOCAL :
return self.CM.get_string(self.format[1][0])
elif self.get_op_value() == DBG_START_LOCAL_EXTENDED :
return self.CM.get_string(self.format[1][0])
return None
def show(self) :
bytecode._PrintSubBanner("DBGBytecode")
bytecode._PrintDefault("op_value=%x format=%s value=%s\n" % (self.op_value, str(self.format), self.get_value()))
def get_obj(self) :
return []
def get_raw(self) :
buff = self.op_value.get_value_buff()
for i in self.format :
if i[1] == "u" :
buff += writeuleb128( i[0] )
elif i[1] == "s" :
buff += writesleb128( i[0] )
return buff
class DebugInfoItem :
def __init__(self, buff, cm) :
self.CM = cm
self.offset = buff.get_idx()
self.line_start = readuleb128( buff )
self.parameters_size = readuleb128( buff )
#print "line", self.line_start, "params", self.parameters_size
self.parameter_names = []
for i in xrange(0, self.parameters_size) :
self.parameter_names.append( readuleb128p1( buff ) )
self.bytecodes = []
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
while bcode.get_op_value() != DBG_END_SEQUENCE :
bcode_value = bcode.get_op_value()
if bcode_value == DBG_ADVANCE_PC :
bcode.add( readuleb128( buff ), "u" )
elif bcode_value == DBG_ADVANCE_LINE :
bcode.add( readsleb128( buff ), "s" )
elif bcode_value == DBG_START_LOCAL :
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_START_LOCAL_EXTENDED :
bcode.add( readusleb128( buff ), "u" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
bcode.add( readuleb128p1( buff ), "u1" )
elif bcode_value == DBG_END_LOCAL :
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_RESTART_LOCAL :
bcode.add( readusleb128( buff ), "u" )
elif bcode_value == DBG_SET_PROLOGUE_END :
pass
elif bcode_value == DBG_SET_EPILOGUE_BEGIN :
pass
elif bcode_value == DBG_SET_FILE :
bcode.add( readuleb128p1( buff ), "u1" )
else : #bcode_value >= DBG_Special_Opcodes_BEGIN and bcode_value <= DBG_Special_Opcodes_END :
pass
bcode = DBGBytecode( self.CM, unpack("=B", buff.read(1))[0] )
self.bytecodes.append( bcode )
def reload(self) :
pass
def get_parameters_size(self) :
return self.parameters_size
def get_line_start(self) :
return self.line_start
def get_parameter_names(self) :
return self.parameter_names
def get_translated_parameter_names(self) :
l = []
for i in self.parameter_names :
if i == -1 :
l.append( None )
else :
l.append( self.CM.get_string( i ) )
return l
def get_bytecodes(self) :
return self.bytecodes
def show(self) :
bytecode._PrintSubBanner("Debug Info Item")
bytecode._PrintDefault("line_start=%d parameters_size=%d\n" % (self.line_start, self.parameters_size))
nb = 0
for i in self.parameter_names :
bytecode._PrintDefault("parameter_names[%d]=%s\n" % (nb, self.CM.get_string( i )))
nb += 1
for i in self.bytecodes :
i.show()
def get_raw(self) :
return [ bytecode.Buff( self.__offset, writeuleb128( self.line_start ) + \
writeuleb128( self.parameters_size ) + \
''.join(writeuleb128(i) for i in self.parameter_names) + \
''.join(i.get_raw() for i in self.bytecodes) ) ]
def get_off(self) :
return self.offset
VALUE_BYTE = 0x00 # (none; must be 0) ubyte[1] signed one-byte integer value
VALUE_SHORT = 0x02 # size - 1 (0..1) ubyte[size] signed two-byte integer value, sign-extended
VALUE_CHAR = 0x03 # size - 1 (0..1) ubyte[size] unsigned two-byte integer value, zero-extended
VALUE_INT = 0x04 # size - 1 (0..3) ubyte[size] signed four-byte integer value, sign-extended
VALUE_LONG = 0x06 # size - 1 (0..7) ubyte[size] signed eight-byte integer value, sign-extended
VALUE_FLOAT = 0x10 # size - 1 (0..3) ubyte[size] four-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 32-bit floating point value
VALUE_DOUBLE = 0x11 # size - 1 (0..7) ubyte[size] eight-byte bit pattern, zero-extended to the right, and interpreted as an IEEE754 64-bit floating point value
VALUE_STRING = 0x17 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the string_ids section and representing a string value
VALUE_TYPE = 0x18 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the type_ids section and representing a reflective type/class value
VALUE_FIELD = 0x19 # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing a reflective field value
VALUE_METHOD = 0x1a # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the method_ids section and representing a reflective method value
VALUE_ENUM = 0x1b # size - 1 (0..3) ubyte[size] unsigned (zero-extended) four-byte integer value, interpreted as an index into the field_ids section and representing the value of an enumerated type constant
VALUE_ARRAY = 0x1c # (none; must be 0) encoded_array an array of values, in the format specified by "encoded_array Format" below. The size of the value is implicit in the encoding.
VALUE_ANNOTATION = 0x1d # (none; must be 0) encoded_annotation a sub-annotation, in the format specified by "encoded_annotation Format" below. The size of the value is implicit in the encoding.
VALUE_NULL = 0x1e # (none; must be 0) (none) null reference value
VALUE_BOOLEAN = 0x1f # boolean (0..1) (none) one-bit value; 0 for false and 1 for true. The bit is represented in the value_arg.
class DebugInfoItemEmpty :
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.__buff = buff
self.__raw = ""
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
offset = self.offset
n = self.__CM.get_next_offset_item( offset )
s_idx = self.__buff.get_idx()
self.__buff.set_idx( offset )
self.__raw = self.__buff.read( n - offset )
self.__buff.set_idx( s_idx )
def show(self) :
pass
def get_obj(self) :
return []
def get_raw(self) :
return self.__raw
def get_length(self) :
return len(self.__raw)
class EncodedArray :
"""
This class can parse an encoded_array of a dex file
:param buff: a string which represents a Buff object of the encoded_array
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.values = []
for i in xrange(0, self.size) :
self.values.append( EncodedValue(buff, cm) )
def get_size(self) :
"""
Return the number of elements in the array
:rtype: int
"""
return self.size
def get_values(self) :
"""
Return a series of size encoded_value byte sequences in the format specified by this section,
concatenated sequentially
:rtype: a list of :class:`EncodedValue` objects
"""
return self.values
def show(self) :
bytecode._PrintSubBanner("Encoded Array")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.values :
i.show()
def get_obj(self) :
return writeuleb128( self.size )
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.values)
def get_length(self) :
length = len(self.get_obj())
for i in self.values :
length += i.get_length()
return length
class EncodedValue :
"""
This class can parse an encoded_value of a dex file
:param buff: a string which represents a Buff object of the encoded_value
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.val = unpack("=B", buff.read(1))[0]
self.value_arg = self.val >> 5
self.value_type = self.val & 0x1f
self.raw_value = None
self.value = ""
# TODO: parse floats/doubles correctly
if self.value_type >= VALUE_SHORT and self.value_type < VALUE_STRING :
self.value, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
elif self.value_type == VALUE_STRING :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_raw_string(id)
elif self.value_type == VALUE_TYPE :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_type(id)
elif self.value_type == VALUE_FIELD :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_METHOD :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_method(id)
elif self.value_type == VALUE_ENUM :
id, self.raw_value = self._getintvalue(buff.read( self.value_arg + 1 ))
self.value = cm.get_field(id)
elif self.value_type == VALUE_ARRAY :
self.value = EncodedArray( buff, cm )
elif self.value_type == VALUE_ANNOTATION :
self.value = EncodedAnnotation( buff, cm )
elif self.value_type == VALUE_BYTE :
self.value = buff.read( 1 )
elif self.value_type == VALUE_NULL :
self.value = None
elif self.value_type == VALUE_BOOLEAN :
if self.value_arg:
self.value = True
else:
self.value = False
else :
bytecode.Exit( "Unknown value 0x%x" % self.value_type )
def get_value(self) :
"""
Return the bytes representing the value, variable in length and interpreted differently for different value_type bytes,
though always little-endian
:rtype: an object representing the value
"""
return self.value
def get_value_type(self) :
return self.value_type
def get_value_arg(self) :
return self.value_arg
def _getintvalue(self, buf):
ret = 0
shift = 0
for b in buf:
ret |= ord(b) << shift
shift += 8
return ret, buf
def show(self) :
bytecode._PrintSubBanner("Encoded Value")
bytecode._PrintDefault("val=%x value_arg=%x value_type=%x\n" % (self.val, self.value_arg, self.value_type))
def get_obj(self) :
if isinstance(self.value, str) == False :
return [ self.value ]
return []
def get_raw(self) :
if self.raw_value == None :
return pack("=B", self.val) + bytecode.object_to_str( self.value )
else :
return pack("=B", self.val) + bytecode.object_to_str( self.raw_value )
def get_length(self) :
if self.raw_value == None :
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.value ))
else :
return len(pack("=B", self.val)) + len(bytecode.object_to_str( self.raw_value ))
class AnnotationElement :
"""
This class can parse an annotation_element of a dex file
:param buff: a string which represents a Buff object of the annotation_element
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.name_idx = readuleb128( buff )
self.value = EncodedValue( buff, cm )
def get_name_idx(self) :
"""
Return the element name, represented as an index into the string_ids section
:rtype: int
"""
return self.name_idx
def get_value(self) :
"""
Return the element value (EncodedValue)
:rtype: a :class:`EncodedValue` object
"""
return self.value
def show(self) :
bytecode._PrintSubBanner("Annotation Element")
bytecode._PrintDefault("name_idx=%d\n" % self.name_idx)
self.value.show()
def get_obj(self) :
return writeuleb128(self.name_idx)
def get_raw(self) :
return self.get_obj() + self.value.get_raw()
def get_length(self) :
return len(self.get_obj()) + self.value.get_length()
class EncodedAnnotation :
"""
This class can parse an encoded_annotation of a dex file
:param buff: a string which represents a Buff object of the encoded_annotation
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.type_idx = readuleb128( buff )
self.size = readuleb128( buff )
self.elements = []
for i in xrange(0, self.size) :
self.elements.append( AnnotationElement( buff, cm ) )
def get_type_idx(self) :
"""
Return the type of the annotation. This must be a class (not array or primitive) type
:rtype: int
"""
return self.type_idx
def get_size(self) :
"""
Return the number of name-value mappings in this annotation
:rtype:int
"""
return self.size
def get_elements(self) :
"""
Return the elements of the annotation, represented directly in-line (not as offsets)
:rtype: a list of :class:`AnnotationElement` objects
"""
return self.elements
def show(self) :
bytecode._PrintSubBanner("Encoded Annotation")
bytecode._PrintDefault("type_idx=%d size=%d\n" % (self.type_idx, self.size))
for i in self.elements :
i.show()
def get_obj(self) :
return [ i for i in self.elements ]
def get_raw(self) :
return writeuleb128(self.type_idx) + writeuleb128(self.size) + ''.join(i.get_raw() for i in self.elements)
def get_length(self) :
length = len(writeuleb128(self.type_idx) + writeuleb128(self.size))
for i in self.elements :
length += i.get_length()
return length
class AnnotationItem :
"""
This class can parse an annotation_item of a dex file
:param buff: a string which represents a Buff object of the annotation_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.visibility = unpack("=B", buff.read(1))[0]
self.annotation = EncodedAnnotation(buff, cm)
def get_visibility(self) :
"""
Return the intended visibility of this annotation
:rtype: int
"""
return self.visibility
def get_annotation(self) :
"""
Return the encoded annotation contents
:rtype: a :class:`EncodedAnnotation` object
"""
return self.annotation
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("Annotation Item")
bytecode._PrintDefault("visibility=%d\n" % self.visibility)
self.annotation.show()
def get_obj(self) :
return [ self.annotation ]
def get_raw(self) :
return pack("=B", self.visibility) + self.annotation.get_raw()
def get_length(self) :
length = len(pack("=B", self.visibility))
length += self.annotation.get_length()
return length
class EncodedArrayItem :
"""
This class can parse an encoded_array_item of a dex file
:param buff: a string which represents a Buff object of the encoded_array_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.value = EncodedArray( buff, cm )
def get_value(self) :
"""
Return the bytes representing the encoded array value
:rtype: a :class:`EncodedArray` object
"""
return self.value
def set_off(self, off) :
self.offset = off
def reload(self) :
pass
def get_value(self) :
return self.value
def show(self) :
bytecode._PrintSubBanner("Encoded Array Item")
self.value.show()
def get_obj(self) :
return [ self.value ]
def get_raw(self) :
return self.value.get_raw()
def get_length(self) :
return self.value.get_length()
def get_off(self) :
return self.offset
class StringDataItem :
"""
This class can parse a string_data_item of a dex file
:param buff: a string which represents a Buff object of the string_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.utf16_size = readuleb128( buff )
self.data = buff.read( self.utf16_size + 1 )
if self.data[-1] != '\x00' :
i = buff.read( 1 )
self.utf16_size += 1
self.data += i
while i != '\x00' :
i = buff.read( 1 )
self.utf16_size += 1
self.data += i
def get_utf16_size(self) :
"""
Return the size of this string, in UTF-16 code units
:rtype:int
"""
return self.utf16_size
def get_data(self) :
"""
Return a series of MUTF-8 code units (a.k.a. octets, a.k.a. bytes) followed by a byte of value 0
:rtype: string
"""
return self.data
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def get(self) :
return self.data[:-1]
def show(self) :
bytecode._PrintSubBanner("String Data Item")
bytecode._PrintDefault("utf16_size=%d data=%s\n" % (self.utf16_size, repr( self.data )))
def get_obj(self) :
return []
def get_raw(self) :
return writeuleb128( self.utf16_size ) + self.data
def get_length(self) :
return len(writeuleb128( self.utf16_size )) + len(self.data)
class StringIdItem :
"""
This class can parse a string_id_item of a dex file
:param buff: a string which represents a Buff object of the string_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.string_data_off = unpack("=I", buff.read(4))[0]
def get_string_data_off(self) :
"""
Return the offset from the start of the file to the string data for this item
:rtype: int
"""
return self.string_data_off
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
pass
def show(self) :
bytecode._PrintSubBanner("String Id Item")
bytecode._PrintDefault("string_data_off=%x\n" % self.string_data_off)
def get_obj(self) :
if self.string_data_off != 0 :
self.string_data_off = self.__CM.get_string_by_offset( self.string_data_off ).get_off()
return pack("=I", self.string_data_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeIdItem :
"""
This class can parse a type_id_item of a dex file
:param buff: a string which represents a Buff object of the type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.descriptor_idx = unpack("=I", buff.read( 4 ) )[0]
self.descriptor_idx_value = None
def get_descriptor_idx(self) :
"""
Return the index into the string_ids list for the descriptor string of this type
:rtype: int
"""
return self.descriptor_idx
def get_descriptor_idx_value(self) :
"""
Return the string associated to the descriptor
:rtype: string
"""
return self.descriptor_idx_value
def reload(self) :
self.descriptor_idx_value = self.__CM.get_string( self.descriptor_idx )
def show(self) :
bytecode._PrintSubBanner("Type Id Item")
bytecode._PrintDefault("descriptor_idx=%d descriptor_idx_value=%s\n" % (self.descriptor_idx, self.descriptor_idx_value))
def get_obj(self) :
return pack("=I", self.descriptor_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class TypeHIdItem :
"""
This class can parse a list of type_id_item of a dex file
:param buff: a string which represents a Buff object of the list of type_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.type = []
for i in xrange(0, size) :
self.type.append( TypeIdItem( buff, cm ) )
def get_type(self) :
"""
Return the list of type_id_item
:rtype: a list of :class:`TypeIdItem` objects
"""
return self.type
def get(self, idx) :
try :
return self.type[ idx ].get_descriptor_idx()
except IndexError :
return -1
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def reload(self) :
for i in self.type :
i.reload()
def show(self) :
bytecode._PrintSubBanner("Type List Item")
for i in self.type :
i.show()
def get_obj(self) :
return [ i for i in self.type ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.type)
def get_length(self) :
length = 0
for i in self.type :
length += i.get_length()
return length
class ProtoIdItem :
"""
This class can parse a proto_id_item of a dex file
:param buff: a string which represents a Buff object of the proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.shorty_idx = unpack("=I", buff.read(4))[0]
self.return_type_idx = unpack("=I", buff.read(4))[0]
self.parameters_off = unpack("=I", buff.read(4))[0]
self.shorty_idx_value = None
self.return_type_idx_value = None
self.parameters_off_value = None
def reload(self) :
self.shorty_idx_value = self.__CM.get_string( self.shorty_idx )
self.return_type_idx_value = self.__CM.get_type( self.return_type_idx )
self.parameters_off_value = self.__CM.get_type_list( self.parameters_off )
def get_shorty_idx(self) :
"""
Return the index into the string_ids list for the short-form descriptor string of this prototype
:rtype: int
"""
return self.shorty_idx
def get_return_type_idx(self) :
"""
Return the index into the type_ids list for the return type of this prototype
:rtype: int
"""
return self.return_type_idx
def get_parameters_off(self) :
"""
Return the offset from the start of the file to the list of parameter types for this prototype, or 0 if this prototype has no parameters
:rtype: int
"""
return self.parameters_off
def get_shorty_idx_value(self) :
"""
Return the string associated to the shorty_idx
:rtype: string
"""
return self.shorty_idx_value
def get_return_type_idx_value(self) :
"""
Return the string associated to the return_type_idx
:rtype: string
"""
return self.return_type_idx_value
def get_parameters_off_value(self) :
"""
Return the string associated to the parameters_off
:rtype: string
"""
return self.parameters_off_value
def show(self) :
bytecode._PrintSubBanner("Proto Item")
bytecode._PrintDefault("shorty_idx=%d return_type_idx=%d parameters_off=%d\n" % (self.shorty_idx, self.return_type_idx, self.parameters_off))
bytecode._PrintDefault("shorty_idx_value=%s return_type_idx_value=%s parameters_off_value=%s\n" %
(self.shorty_idx_value, self.return_type_idx_value, self.parameters_off_value))
def get_obj(self) :
if self.parameters_off != 0 :
self.parameters_off = self.__CM.get_obj_by_offset( self.parameters_off ).get_off()
return pack("=I", self.shorty_idx) + pack("=I", self.return_type_idx) + pack("=I", self.parameters_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class ProtoHIdItem :
"""
This class can parse a list of proto_id_item of a dex file
:param buff: a string which represents a Buff object of the list of proto_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.proto = []
for i in xrange(0, size) :
self.proto.append( ProtoIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get(self, idx) :
try :
return self.proto[ idx ]
except IndexError :
return ProtoIdItemInvalid()
def reload(self) :
for i in self.proto :
i.reload()
def show(self) :
bytecode._PrintSubBanner("Proto List Item")
for i in self.proto :
i.show()
def get_obj(self) :
return [ i for i in self.proto ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.proto)
def get_length(self) :
length = 0
for i in self.proto :
length += i.get_length()
return length
class FieldIdItem :
"""
This class can parse a field_id_item of a dex file
:param buff: a string which represents a Buff object of the field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.class_idx = unpack("=H", buff.read(2))[0]
self.type_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.type_idx_value = None
self.name_idx_value = None
def reload(self) :
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.type_idx_value = self.__CM.get_type( self.type_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self) :
"""
Return the index into the type_ids list for the definer of this field
:rtype: int
"""
return self.class_idx
def get_type_idx(self) :
"""
Return the index into the type_ids list for the type of this field
:rtype: int
"""
return self.type_idx
def get_name_idx(self) :
"""
Return the index into the string_ids list for the name of this field
:rtype: int
"""
return self.name_idx
def get_class_name(self) :
"""
Return the class name of the field
:rtype: string
"""
return self.class_idx_value
def get_type(self) :
"""
Return the type of the field
:rtype: string
"""
return self.type_idx_value
def get_descriptor(self) :
"""
Return the descriptor of the field
:rtype: string
"""
return self.type_idx_value
def get_name(self) :
"""
Return the name of the field
:rtype: string
"""
return self.name_idx_value
def get_list(self) :
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self) :
bytecode._PrintSubBanner("Field Id Item")
bytecode._PrintDefault("class_idx=%d type_idx=%d name_idx=%d\n" % (self.class_idx, self.type_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s type_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.type_idx_value, self.name_idx_value))
def get_obj(self) :
return pack("=H", self.class_idx) + \
pack("=H", self.type_idx) + \
pack("=I", self.name_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class FieldHIdItem :
"""
This class can parse a list of field_id_item of a dex file
:param buff: a string which represents a Buff object of the list of field_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.offset = buff.get_idx()
self.elem = []
for i in xrange(0, size) :
self.elem.append( FieldIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def gets(self) :
return self.elem
def get(self, idx) :
try :
return self.elem[ idx ]
except IndexError :
return FieldIdItemInvalid()
def reload(self) :
for i in self.elem :
i.reload()
def show(self) :
nb = 0
for i in self.elem :
print nb,
i.show()
nb = nb + 1
def get_obj(self) :
return [ i for i in self.elem ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.elem)
def get_length(self) :
length = 0
for i in self.elem :
length += i.get_length()
return length
class MethodIdItem :
"""
This class can parse a method_id_item of a dex file
:param buff: a string which represents a Buff object of the method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.class_idx = unpack("=H", buff.read(2))[0]
self.proto_idx = unpack("=H", buff.read(2))[0]
self.name_idx = unpack("=I", buff.read(4))[0]
self.class_idx_value = None
self.proto_idx_value = None
self.name_idx_value = None
def reload(self) :
self.class_idx_value = self.__CM.get_type( self.class_idx )
self.proto_idx_value = self.__CM.get_proto( self.proto_idx )
self.name_idx_value = self.__CM.get_string( self.name_idx )
def get_class_idx(self) :
"""
Return the index into the type_ids list for the definer of this method
:rtype: int
"""
return self.class_idx
def get_proto_idx(self) :
"""
Return the index into the proto_ids list for the prototype of this method
:rtype: int
"""
return self.proto_idx
def get_name_idx(self) :
"""
Return the index into the string_ids list for the name of this method
:rtype: int
"""
return self.name_idx
def get_class_name(self) :
"""
Return the class name of the method
:rtype: string
"""
return self.class_idx_value
def get_proto(self) :
"""
Return the prototype of the method
:rtype: string
"""
return self.proto_idx_value
def get_descriptor(self) :
"""
Return the descriptor
:rtype: string
"""
proto = self.get_proto()
return proto[0] + proto[1]
def get_name(self) :
"""
Return the name of the method
:rtype: string
"""
return self.name_idx_value
def get_list(self) :
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def show(self) :
bytecode._PrintSubBanner("Method Id Item")
bytecode._PrintDefault("class_idx=%d proto_idx=%d name_idx=%d\n" % (self.class_idx, self.proto_idx, self.name_idx))
bytecode._PrintDefault("class_idx_value=%s proto_idx_value=%s name_idx_value=%s\n" % (self.class_idx_value, self.proto_idx_value, self.name_idx_value))
def get_obj(self) :
return pack("H", self.class_idx) + pack("H", self.proto_idx) + pack("I", self.name_idx)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class MethodHIdItem :
"""
This class can parse a list of method_id_item of a dex file
:param buff: a string which represents a Buff object of the list of method_id_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.methods = []
for i in xrange(0, size) :
self.methods.append( MethodIdItem(buff, cm) )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get(self, idx) :
try :
return self.methods[ idx ]
except IndexError :
return MethodIdItemInvalid()
def reload(self) :
for i in self.methods :
i.reload()
def show(self) :
print "METHOD_ID_ITEM"
nb = 0
for i in self.methods :
print nb,
i.show()
nb = nb + 1
def get_obj(self) :
return [ i for i in self.methods ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.methods)
def get_length(self) :
length = 0
for i in self.methods :
length += i.get_length()
return length
class ProtoIdItemInvalid :
def get_params(self) :
return "AG:IPI:invalid_params;"
def get_shorty(self) :
return "(AG:IPI:invalid_shorty)"
def get_return_type(self) :
return "(AG:IPI:invalid_return_type)"
def show(self) :
print "AG:IPI:invalid_proto_item", self.get_shorty(), self.get_return_type(), self.get_params()
class FieldIdItemInvalid :
def get_class_name(self) :
return "AG:IFI:invalid_class_name;"
def get_type(self) :
return "(AG:IFI:invalid_type)"
def get_descriptor(self) :
return "(AG:IFI:invalid_descriptor)"
def get_name(self) :
return "AG:IFI:invalid_name"
def get_list(self) :
return [ self.get_class_name(), self.get_type(), self.get_name() ]
def show(self) :
print "AG:IFI:invalid_field_item"
class MethodIdItemInvalid :
def get_class_name(self) :
return "AG:IMI:invalid_class_name;"
def get_descriptor(self) :
return "(AG:IMI:invalid_descriptor)"
def get_proto(self) :
return "()AG:IMI:invalid_proto"
def get_name(self) :
return "AG:IMI:invalid_name"
def get_list(self) :
return [ self.get_class_name(), self.get_name(), self.get_proto() ]
def show(self) :
print "AG:IMI:invalid_method_item"
class EncodedField :
"""
This class can parse an encoded_field of a dex file
:param buff: a string which represents a Buff object of the encoded field
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.CM = cm
self.field_idx_diff = readuleb128( buff )
self.access_flags = readuleb128( buff )
self.field_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.init_value = None
self.access_flags_string = None
def reload(self) :
name = self.CM.get_field( self.field_idx )
self.class_name = name[0]
self.name = name[2]
self.proto = ''.join(i for i in name[1])
def set_init_value(self, value) :
"""
Setup the init value object of the field
:param value: the init value
:type value: :class:`EncodedValue`
"""
self.init_value = value
def get_init_value(self) :
"""
Return the init value object of the field
:rtype: :class:`EncodedValue`
"""
return self.init_value
def adjust_idx(self, val) :
self.field_idx = self.field_idx_diff + val
def get_field_idx_diff(self) :
"""
Return the index into the field_ids list for the identity of this field (includes the name and descriptor),
represented as a difference from the index of previous element in the list
:rtype: int
"""
return self.field_idx_diff
def get_field_idx(self) :
"""
Return the real index of the method
:rtype: int
"""
return self.field_idx
def get_access_flags(self) :
"""
Return the access flags of the field
:rtype: int
"""
return self.access_flags
def get_class_name(self) :
"""
Return the class name of the field
:rtype: string
"""
return self.class_name
def get_descriptor(self) :
"""
Return the descriptor of the field
:rtype: string
"""
return self.proto
def get_name(self) :
"""
Return the name of the field
:rtype: string
"""
return self.name
def get_access_flags_string(self) :
"""
Return the access flags string of the field
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def set_name(self, value) :
self.CM.set_hook_field_name( self, value )
self.reload()
def get_obj(self) :
return []
def get_raw(self) :
return writeuleb128( self.field_idx_diff ) + writeuleb128( self.access_flags )
def get_size(self) :
return len(self.get_raw())
def show(self) :
"""
Display the information about the field
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self) :
"""
Display the information (with a pretty print) about the field
"""
bytecode._PrintSubBanner("Field Information")
bytecode._PrintDefault("%s->%s %s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
init_value = self.get_init_value()
if init_value != None :
bytecode._PrintDefault( "\tinit value: %s\n" % str( init_value.get_value() ) )
self.show_dref()
def show_dref(self) :
"""
Display where this field is read or written
"""
try :
bytecode._PrintSubBanner("DREF")
bytecode._PrintDRef("R", self.DREFr.items)
bytecode._PrintDRef("W", self.DREFw.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
class EncodedMethod :
"""
This class can parse an encoded_method of a dex file
:param buff: a string which represents a Buff object of the encoded_method
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.CM = cm
self.method_idx_diff = readuleb128( buff ) #: method index diff in the corresponding section
self.access_flags = readuleb128( buff ) #: access flags of the method
self.code_off = readuleb128( buff ) #: offset of the code section
self.method_idx = 0
self.name = None
self.proto = None
self.class_name = None
self.code = None
self.access_flags_string = None
self.notes = []
def adjust_idx(self, val) :
self.method_idx = self.method_idx_diff + val
def get_method_idx(self) :
"""
Return the real index of the method
:rtype: int
"""
return self.method_idx
def get_method_idx_diff(self) :
"""
Return index into the method_ids list for the identity of this method (includes the name and descriptor),
represented as a difference from the index of previous element in the lis
:rtype: int
"""
return self.method_idx_diff
def get_access_flags(self) :
"""
Return the access flags of the method
:rtype: int
"""
return self.access_flags
def get_code_off(self) :
"""
Return the offset from the start of the file to the code structure for this method,
or 0 if this method is either abstract or native
:rtype: int
"""
return self.code_off
def get_access_flags_string(self) :
"""
Return the access flags string of the method
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def reload(self) :
v = self.CM.get_method( self.method_idx )
self.class_name = v[0]
self.name = v[1]
self.proto = ''.join(i for i in v[2])
self.code = self.CM.get_code( self.code_off )
def get_locals(self):
ret = self.proto.split(')')
params = ret[0][1:].split()
return self.code.get_registers_size() - len(params) - 1
def each_params_by_register(self, nb, proto):
bytecode._PrintSubBanner("Params")
ret = proto.split(')')
params = ret[0][1:].split()
if params:
bytecode._PrintDefault("- local registers: v%d...v%d\n" % (0, nb - len(params) - 1))
j = 0
for i in xrange(nb - len(params), nb):
bytecode._PrintDefault("- v%d:%s\n" % (i, get_type(params[j])))
j += 1
else :
bytecode._PrintDefault("local registers: v%d...v%d\n" % (0, nb-1))
bytecode._PrintDefault("- return:%s\n" % get_type(ret[1]))
bytecode._PrintSubBanner()
def show_info(self) :
"""
Display the basic information about the method
"""
bytecode._PrintSubBanner("Method Information")
bytecode._PrintDefault("%s->%s%s [access_flags=%s]\n" % ( self.get_class_name(), self.get_name(), self.get_descriptor(), self.get_access_flags_string() ))
def show(self) :
"""
Display the information about the method
"""
colors = bytecode.disable_print_colors()
self.pretty_show()
bytecode.enable_print_colors(colors)
def pretty_show(self) :
"""
Display the information (with a pretty print) about the method
"""
self.show_info()
self.show_notes()
if self.code != None :
self.each_params_by_register( self.code.get_registers_size(), self.get_descriptor() )
if self.CM.get_vmanalysis() == None :
self.code.show()
else :
self.code.pretty_show( self.CM.get_vmanalysis().get_method( self ) )
self.show_xref()
def show_xref(self) :
"""
Display where the method is called or which method is called
"""
try :
bytecode._PrintSubBanner("XREF")
bytecode._PrintXRef("F", self.XREFfrom.items)
bytecode._PrintXRef("T", self.XREFto.items)
bytecode._PrintSubBanner()
except AttributeError:
pass
def show_notes(self) :
"""
Display the notes about the method
"""
if self.notes != [] :
bytecode._PrintSubBanner("Notes")
for i in self.notes :
bytecode._PrintNote(i)
bytecode._PrintSubBanner()
def source(self) :
"""
Return the source code of this method
:rtype: string
"""
self.CM.decompiler_ob.display_source( self )
def get_length(self) :
"""
Return the length of the associated code of the method
:rtype: int
"""
if self.code != None :
return self.code.get_length()
return 0
def get_code(self) :
"""
Return the code object associated to the method
:rtype: :class:`DalvikCode` object
"""
return self.code
def get_instructions(self) :
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
if self.code == None :
return []
return self.code.get_bc().get_instructions()
def set_instructions(self, instructions) :
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
if self.code == None :
return []
return self.code.get_bc().set_instructions(instructions)
def get_instruction(self, idx, off=None) :
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if self._code != None :
return self.code.get_bc().get_instruction(idx, off)
return None
def get_debug(self) :
"""
Return the debug object associated to this method
:rtype: :class:`DebugInfoItem`
"""
if self.code == None :
return None
return self.code.get_debug()
def get_descriptor(self) :
"""
Return the descriptor of the method
:rtype: string
"""
return self.proto
def get_class_name(self) :
"""
Return the class name of the method
:rtype: string
"""
return self.class_name
def get_name(self) :
"""
Return the name of the method
:rtype: string
"""
return self.name
def add_inote(self, msg, idx, off=None) :
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code != None :
self.code.add_inote(msg, idx, off)
def add_note(self, msg) :
"""
Add a message to this method
:param msg: the message
:type msg: string
"""
self.notes.append( msg )
def set_code_idx(self, idx) :
"""
Set the start address of the buffer to disassemble
:param idx: the index
:type idx: int
"""
if self.code != None :
self.code.set_idx( idx )
def set_name(self, value) :
self.CM.set_hook_method_name( self, value )
self.reload()
def get_raw(self) :
if self.code != None :
self.code_off = self.code.get_off()
return writeuleb128( self.method_idx_diff ) + writeuleb128( self.access_flags ) + writeuleb128( self.code_off )
def get_size(self) :
return len(self.get_raw())
class ClassDataItem :
"""
This class can parse a class_data_item of a dex file
:param buff: a string which represents a Buff object of the class_data_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.static_fields_size = readuleb128( buff )
self.instance_fields_size = readuleb128( buff )
self.direct_methods_size = readuleb128( buff )
self.virtual_methods_size = readuleb128( buff )
self.static_fields = []
self.instance_fields = []
self.direct_methods = []
self.virtual_methods = []
self._load_elements( self.static_fields_size, self.static_fields, EncodedField, buff, cm )
self._load_elements( self.instance_fields_size, self.instance_fields, EncodedField, buff, cm )
self._load_elements( self.direct_methods_size, self.direct_methods, EncodedMethod, buff, cm )
self._load_elements( self.virtual_methods_size, self.virtual_methods, EncodedMethod, buff, cm )
def get_static_fields_size(self) :
"""
Return the number of static fields defined in this item
:rtype: int
"""
return self.static_fields_size
def get_instance_fields_size(self) :
"""
Return the number of instance fields defined in this item
:rtype: int
"""
return self.instance_fields_size
def get_direct_methods_size(self) :
"""
Return the number of direct methods defined in this item
:rtype: int
"""
return self.direct_methods_size
def get_virtual_methods_size(self) :
"""
Return the number of virtual methods defined in this item
:rtype: int
"""
return self.virtual_methods_size
def get_static_fields(self) :
"""
Return the defined static fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.static_fields
def get_instance_fields(self) :
"""
Return the defined instance fields, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedField` objects
"""
return self.instance_fields
def get_direct_methods(self) :
"""
Return the defined direct (any of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.direct_methods
def get_virtual_methods(self) :
"""
Return the defined virtual (none of static, private, or constructor) methods, represented as a sequence of encoded elements
:rtype: a list of :class:`EncodedMethod` objects
"""
return self.virtual_methods
def get_methods(self) :
"""
Return direct and virtual methods
:rtype: a list of :class:`EncodedMethod` objects
"""
return [ x for x in self.direct_methods ] + [ x for x in self.virtual_methods ]
def get_fields(self) :
"""
Return static and instance fields
:rtype: a list of :class:`EncodedField` objects
"""
return [ x for x in self.static_fields ] + [ x for x in self.instance_fields ]
def set_off(self, off) :
self.offset = off
def set_static_fields(self, value) :
if value != None :
values = value.get_values()
if len(values) <= len(self.static_fields) :
for i in xrange(0, len(values)) :
self.static_fields[i].set_init_value( values[i] )
def _load_elements(self, size, l, Type, buff, cm) :
prev = 0
for i in xrange(0, size) :
el = Type(buff, cm)
el.adjust_idx( prev )
if isinstance(el, EncodedField) :
prev = el.get_field_idx()
else :
prev = el.get_method_idx()
l.append( el )
def reload(self) :
for i in self.static_fields :
i.reload()
for i in self.instance_fields :
i.reload()
for i in self.direct_methods :
i.reload()
for i in self.virtual_methods :
i.reload()
def show(self) :
self.pretty_show()
def pretty_show(self) :
bytecode._PrintSubBanner("Class Data Item")
bytecode._PrintDefault("static_fields_size=%d instance_fields_size=%d direct_methods_size=%d virtual_methods_size=%d\n" % \
(self.static_fields_size, self.instance_fields_size, self.direct_methods_size, self.virtual_methods_size))
bytecode._PrintSubBanner("Static Fields")
for i in self.static_fields :
i.show()
bytecode._PrintSubBanner("Instance Fields")
for i in self.instance_fields :
i.show()
bytecode._PrintSubBanner("Direct Methods")
for i in self.direct_methods :
i.pretty_show()
bytecode._PrintSubBanner("Virtual Methods")
for i in self.virtual_methods :
i.pretty_show()
def get_obj(self) :
return [ i for i in self.static_fields ] + \
[ i for i in self.instance_fields ] + \
[ i for i in self.direct_methods ] + \
[ i for i in self.virtual_methods ]
def get_raw(self) :
buff = writeuleb128( self.static_fields_size ) + \
writeuleb128( self.instance_fields_size ) + \
writeuleb128( self.direct_methods_size ) + \
writeuleb128( self.virtual_methods_size ) + \
''.join(i.get_raw() for i in self.static_fields) + \
''.join(i.get_raw() for i in self.instance_fields) + \
''.join(i.get_raw() for i in self.direct_methods) + \
''.join(i.get_raw() for i in self.virtual_methods)
return buff
def get_length(self) :
length = len(writeuleb128( self.static_fields_size )) + \
len(writeuleb128( self.instance_fields_size )) + \
len(writeuleb128( self.direct_methods_size )) + \
len(writeuleb128( self.virtual_methods_size ))
for i in self.static_fields :
length += i.get_size()
for i in self.instance_fields :
length += i.get_size()
for i in self.direct_methods :
length += i.get_size()
for i in self.virtual_methods :
length += i.get_size()
return length
def get_off(self) :
return self.offset
class ClassDefItem :
"""
This class can parse a class_def_item of a dex file
:param buff: a string which represents a Buff object of the class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.class_idx = unpack("=I", buff.read(4))[0]
self.access_flags = unpack("=I", buff.read(4))[0]
self.superclass_idx = unpack("=I", buff.read(4))[0]
self.interfaces_off = unpack("=I", buff.read(4))[0]
self.source_file_idx = unpack("=I", buff.read(4))[0]
self.annotations_off = unpack("=I", buff.read(4))[0]
self.class_data_off = unpack("=I", buff.read(4))[0]
self.static_values_off = unpack("=I", buff.read(4))[0]
self.interfaces = None
self.class_data_item = None
self.static_values = None
self.name = None
self.sname = None
self.access_flags_string = None
def reload(self) :
self.name = self.__CM.get_type( self.class_idx )
self.sname = self.__CM.get_type( self.superclass_idx )
if self.interfaces_off != 0 :
self.interfaces = self.__CM.get_type_list( self.interfaces_off )
if self.class_data_off != 0 :
self.class_data_item = self.__CM.get_class_data_item( self.class_data_off )
self.class_data_item.reload()
if self.static_values_off != 0 :
self.static_values = self.__CM.get_encoded_array_item ( self.static_values_off )
if self.class_data_item != None :
self.class_data_item.set_static_fields( self.static_values.get_value() )
def get_methods(self) :
"""
Return all methods of this class
:rtype: a list of :class:`EncodedMethod` objects
"""
if self.class_data_item != None :
return self.class_data_item.get_methods()
return []
def get_fields(self) :
"""
Return all fields of this class
:rtype: a list of :class:`EncodedField` objects
"""
if self.class_data_item != None :
return self.class_data_item.get_fields()
return []
def get_class_idx(self) :
"""
Return the index into the type_ids list for this class
:rtype: int
"""
return self.class_idx
def get_access_flags(self) :
"""
Return the access flags for the class (public, final, etc.)
:rtype: int
"""
return self.access_flags
def get_superclass_idx(self) :
"""
Return the index into the type_ids list for the superclass
:rtype: int
"""
return self.superclass_idx
def get_interfaces_off(self) :
"""
Return the offset from the start of the file to the list of interfaces, or 0 if there are none
:rtype: int
"""
return self.interfaces_off
def get_source_file_idx(self) :
"""
Return the index into the string_ids list for the name of the file containing the original
source for (at least most of) this class, or the special value NO_INDEX to represent a lack of this information
:rtype: int
"""
return self.source_file_idx
def get_annotations_off(self) :
"""
Return the offset from the start of the file to the annotations structure for this class,
or 0 if there are no annotations on this class.
:rtype: int
"""
return self.annotations_off
def get_class_data_off(self) :
"""
Return the offset from the start of the file to the associated class data for this item,
or 0 if there is no class data for this class
:rtype: int
"""
return self.class_data_off
def get_static_values_off(self) :
"""
Return the offset from the start of the file to the list of initial values for static fields,
or 0 if there are none (and all static fields are to be initialized with 0 or null)
:rtype: int
"""
return self.static_values_off
def get_class_data(self) :
"""
Return the associated class_data_item
:rtype: a :class:`ClassDataItem` object
"""
return self.class_data_item
def get_name(self) :
"""
Return the name of this class
:rtype: int
"""
return self.name
def get_superclassname(self) :
"""
Return the name of the super class
:rtype: string
"""
return self.sname
def get_interfaces(self) :
"""
Return the name of the interface
:rtype: string
"""
return self.interfaces
def get_access_flags_string(self) :
"""
Return the access flags string of the class
:rtype: string
"""
if self.access_flags_string == None :
self.access_flags_string = get_access_flags_string( self.get_access_flags() )
if self.access_flags_string == "" :
self.access_flags_string = "0x%x" % self.get_access_flags()
return self.access_flags_string
def show(self) :
bytecode._PrintSubBanner("Class Def Item")
bytecode._PrintDefault("name=%s, sname=%s, interfaces=%s, access_flags=%s\n" %
( self.name,
self.sname,
self.interfaces,
self.get_access_flags_string()))
bytecode._PrintDefault("class_idx=%d, superclass_idx=%d, interfaces_off=%x, source_file_idx=%d, annotations_off=%x, class_data_off=%x, static_values_off=%x\n" %
( self.class_idx,
self.superclass_idx,
self.interfaces_off,
self.source_file_idx,
self.annotations_off,
self.class_data_off,
self.static_values_off))
def source(self) :
"""
Return the source code of the entire class
:rtype: string
"""
self.__CM.decompiler_ob.display_all( self )
def set_name(self, value) :
self.__CM.set_hook_class_name( self, value )
def get_obj(self) :
if self.interfaces_off != 0 :
self.interfaces_off = self.__CM.get_obj_by_offset( self.interfaces_off ).get_off()
if self.annotations_off != 0 :
self.annotations_off = self.__CM.get_obj_by_offset( self.annotations_off ).get_off()
if self.class_data_off != 0 :
self.class_data_off = self.__CM.get_obj_by_offset( self.class_data_off ).get_off()
if self.static_values_off != 0 :
self.static_values_off = self.__CM.get_obj_by_offset( self.static_values_off ).get_off()
return pack("=I", self.class_idx) + \
pack("=I", self.access_flags) + \
pack("=I", self.superclass_idx) + \
pack("=I", self.interfaces_off) + \
pack("=I", self.source_file_idx) + \
pack("=I", self.annotations_off) + \
pack("=I", self.class_data_off) + \
pack("=I", self.static_values_off)
def get_raw(self) :
return self.get_obj()
def get_length(self) :
return len(self.get_obj())
class ClassHDefItem :
"""
This class can parse a list of class_def_item of a dex file
:param buff: a string which represents a Buff object of the list of class_def_item
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.class_def = []
for i in xrange(0, size) :
idx = buff.get_idx()
class_def = ClassDefItem( buff, cm )
self.class_def.append( class_def )
buff.set_idx( idx + calcsize("=IIIIIIII") )
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_class_idx(self, idx) :
for i in self.class_def :
if i.get_class_idx() == idx :
return i
return None
def get_method(self, name_class, name_method) :
l = []
for i in self.class_def :
if i.get_name() == name_class :
for j in i.get_methods() :
if j.get_name() == name_method :
l.append(j)
return l
def get_names(self) :
return [ x.get_name() for x in self.class_def ]
def reload(self) :
for i in self.class_def :
i.reload()
def show(self) :
for i in self.class_def :
i.show()
def get_obj(self) :
return [ i for i in self.class_def ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.class_def)
def get_length(self) :
length = 0
for i in self.class_def :
length += i.get_length()
return length
class EncodedTypeAddrPair :
"""
This class can parse an encoded_type_addr_pair of a dex file
:param buff: a string which represents a Buff object of the encoded_type_addr_pair
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff) :
self.type_idx = readuleb128( buff )
self.addr = readuleb128( buff )
def get_type_idx(self) :
"""
Return the index into the type_ids list for the type of the exception to catch
:rtype: int
"""
return self.type_idx
def get_addr(self) :
"""
Return the bytecode address of the associated exception handler
:rtype: int
"""
return self.addr
def get_obj(self) :
return []
def show(self) :
bytecode._PrintSubBanner("Encoded Type Addr Pair")
bytecode._PrintDefault("type_idx=%d addr=%x\n" % (self.type_idx, self.addr))
def get_raw(self) :
return writeuleb128( self.type_idx ) + writeuleb128( self.addr )
def get_length(self) :
return len(self.get_raw())
class EncodedCatchHandler :
"""
This class can parse an encoded_catch_handler of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.size = readsleb128( buff )
self.handlers = []
for i in xrange(0, abs(self.size)) :
self.handlers.append( EncodedTypeAddrPair(buff) )
if self.size <= 0 :
self.catch_all_addr = readuleb128( buff )
def get_size(self) :
"""
Return the number of catch types in this list
:rtype: int
"""
return self.size
def get_handlers(self) :
"""
Return the stream of abs(size) encoded items, one for each caught type, in the order that the types should be tested.
:rtype: a list of :class:`EncodedTypeAddrPair` objects
"""
return self.handlers
def get_catch_all_addr(self) :
"""
Return the bytecode address of the catch-all handler. This element is only present if size is non-positive.
:rtype: int
"""
return self.catch_all_addr
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def show(self) :
bytecode._PrintSubBanner("Encoded Catch Handler")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.handlers :
i.show()
if self.size <= 0 :
bytecode._PrintDefault("catch_all_addr=%x\n" % self.catch_all_addr)
def get_raw(self) :
buff = writesleb128( self.size ) + ''.join(i.get_raw() for i in self.handlers)
if self.size <= 0 :
buff += writeuleb128( self.catch_all_addr )
return buff
def get_length(self) :
length = len(writesleb128( self.size ))
for i in self.handlers :
length += i.get_length()
if self.size <= 0 :
length += len(writeuleb128( self.catch_all_addr ))
return length
class EncodedCatchHandlerList :
"""
This class can parse an encoded_catch_handler_list of a dex file
:param buff: a string which represents a Buff object of the encoded_catch_handler_list
:type buff: Buff object
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.size = readuleb128( buff )
self.list = []
for i in xrange(0, self.size) :
self.list.append( EncodedCatchHandler(buff, cm) )
def get_size(self) :
"""
Return the size of this list, in entries
:rtype: int
"""
return self.size
def get_list(self) :
"""
Return the actual list of handler lists, represented directly (not as offsets), and concatenated sequentially
:rtype: a list of :class:`EncodedCatchHandler` objects
"""
return self.list
def show(self) :
bytecode._PrintSubBanner("Encoded Catch Handler List")
bytecode._PrintDefault("size=%d\n" % self.size)
for i in self.list :
i.show()
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def get_obj(self) :
return writeuleb128( self.size )
def get_raw(self) :
return self.get_obj() + ''.join(i.get_raw() for i in self.list)
def get_length(self) :
length = len(self.get_obj())
for i in self.list :
length += i.get_length()
return length
KIND_METH = 0
KIND_STRING = 1
KIND_FIELD = 2
KIND_TYPE = 3
VARIES = 4
INLINE_METHOD = 5
VTABLE_OFFSET = 6
FIELD_OFFSET = 7
KIND_RAW_STRING = 8
def get_kind(cm, kind, value) :
"""
Return the value of the 'kind' argument
:param cm: a ClassManager object
:type cm: :class:`ClassManager`
:param kind: the type of the 'kind' argument
:type kind: int
:param value: the value of the 'kind' argument
:type value: int
:rtype: string
"""
if kind == KIND_METH:
method = cm.get_method_ref(value)
class_name = method.get_class_name()
name = method.get_name()
descriptor = method.get_descriptor()
return "%s->%s%s" % (class_name, name, descriptor)
elif kind == KIND_STRING:
return repr(cm.get_string(value))
elif kind == KIND_RAW_STRING:
return cm.get_string(value)
elif kind == KIND_FIELD:
class_name, proto, field_name = cm.get_field(value)
return "%s->%s %s" % (class_name, field_name, proto)
elif kind == KIND_TYPE:
return cm.get_type(value)
elif kind == VTABLE_OFFSET:
return "vtable[0x%x]" % value
elif kind == FIELD_OFFSET:
return "field[0x%x]" % value
elif kind == INLINE_METHOD:
buff = "inline[0x%x]" % value
# FIXME: depends of the android version ...
if len(INLINE_METHODS) > value:
elem = INLINE_METHODS[value]
buff += " %s->%s%s" % (elem[0], elem[1], elem[2])
return buff
return None
class Instruction(object) :
"""
This class represents a dalvik instruction
"""
def get_kind(self) :
"""
Return the 'kind' argument of the instruction
:rtype: int
"""
if self.OP > 0xff :
if self.OP >= 0xf2ff :
return DALVIK_OPCODES_OPTIMIZED[ self.OP ][1][1]
return DALVIK_OPCODES_EXTENDED_WIDTH[ self.OP ][1][1]
return DALVIK_OPCODES_FORMAT[ self.OP ][1][1]
def get_name(self) :
"""
Return the name of the instruction
:rtype: string
"""
if self.OP > 0xff :
if self.OP >= 0xf2ff :
return DALVIK_OPCODES_OPTIMIZED[ self.OP ][1][0]
return DALVIK_OPCODES_EXTENDED_WIDTH[ self.OP ][1][0]
return DALVIK_OPCODES_FORMAT[ self.OP ][1][0]
def get_op_value(self) :
"""
Return the value of the opcode
:rtype: int
"""
return self.OP
def get_literals(self) :
"""
Return the associated literals
:rtype: list of int
"""
return []
def show(self, idx) :
"""
Print the instruction
"""
print self.get_name() + " " + self.get_output(idx),
def show_buff(self, idx) :
"""
Return the display of the instruction
:rtype: string
"""
return self.get_output(idx)
def get_translated_kind(self) :
"""
Return the translated value of the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.get_ref_kind())
def get_output(self, idx=-1) :
"""
Return an additional output of the instruction
:rtype: string
"""
raise("not implemented")
def get_length(self) :
"""
Return the length of the instruction
:rtype: int
"""
raise("not implemented")
def get_raw(self) :
"""
Return the object in a raw format
:rtype: string
"""
raise("not implemented")
def get_ref_kind(self) :
"""
Return the value of the 'kind' argument
:rtype: value
"""
raise("not implemented")
class InstructionInvalid(Instruction) :
"""
This class represents an invalid instruction
"""
def __init__(self, cm, buff) :
super(InstructionInvalid, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#debug("OP:%x" % (self.OP))
def get_name(self) :
"""
Return the name of the instruction
:rtype: string
"""
return "invalid"
def get_output(self, idx=-1) :
return "(OP:%x)" % self.OP
def get_length(self) :
return 2
def get_raw(self) :
return pack("=H", self.OP)
class FillArrayData :
"""
This class can parse a FillArrayData instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff) :
self.notes = []
self.format_general_size = calcsize("=HHI")
self.ident = unpack("=H", buff[0:2])[0]
self.element_width = unpack("=H", buff[2:4])[0]
self.size = unpack("=I", buff[4:8])[0]
self.data = buff[ self.format_general_size : self.format_general_size + (self.size * self.element_width) +1 ]
def add_note(self, msg) :
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append( msg )
def get_notes(self) :
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self) :
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_data(self) :
"""
Return the data of this instruction (the payload)
:rtype: string
"""
return self.data
def get_output(self, idx=-1) :
"""
Return an additional output of the instruction
:rtype: string
"""
buff = ""
data = self.get_data()
buff += repr(data) + " | "
for i in xrange(0, len(data)) :
buff += "\\x%02x" % ord( data[i] )
return buff
def get_name(self) :
"""
Return the name of the instruction
:rtype: string
"""
return "fill-array-data-payload"
def show_buff(self, pos) :
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.data)) :
buff += "\\x%02x" % ord( self.data[i] )
return buff
def show(self, pos) :
"""
Print the instruction
"""
print self.show_buff(pos),
def get_length(self) :
"""
Return the length of the instruction
:rtype: int
"""
return ((self.size * self.element_width + 1) / 2 + 4) * 2
def get_raw(self) :
return pack("=H", self.ident) + pack("=H", self.element_width) + pack("=I", self.size) + self.data
class SparseSwitch :
"""
This class can parse a SparseSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff) :
self.notes = []
self.format_general_size = calcsize("=HH")
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.keys = []
self.targets = []
idx = self.format_general_size
for i in xrange(0, self.size) :
self.keys.append( unpack('=l', buff[idx:idx+4])[0] )
idx += 4
for i in xrange(0, self.size) :
self.targets.append( unpack('=l', buff[idx:idx+4])[0] )
idx += 4
def add_note(self, msg) :
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append( msg )
def get_notes(self) :
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self) :
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self) :
"""
Return the keys of the instruction
:rtype: a list of long
"""
return self.keys
def get_values(self) :
return self.get_keys()
def get_targets(self) :
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1) :
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % i for i in self.keys)
def get_name(self) :
"""
Return the name of the instruction
:rtype: string
"""
return "sparse-switch-payload"
def show_buff(self, pos) :
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
for i in xrange(0, len(self.keys)) :
buff += "%x:%x " % (self.keys[i], self.targets[i])
return buff
def show(self, pos) :
"""
Print the instruction
"""
print self.show_buff( pos ),
def get_length(self) :
return self.format_general_size + (self.size * calcsize('<L')) * 2
def get_raw(self) :
return pack("=H", self.ident) + pack("=H", self.size) + ''.join(pack("=l", i) for i in self.keys) + ''.join(pack("=l", i) for i in self.targets)
class PackedSwitch :
"""
This class can parse a PackedSwitch instruction
:param buff: a Buff object which represents a buffer where the instruction is stored
"""
def __init__(self, buff) :
self.notes = []
self.format_general_size = calcsize( "=HHI" )
self.ident = unpack("=H", buff[0:2])[0]
self.size = unpack("=H", buff[2:4])[0]
self.first_key = unpack("=i", buff[4:8])[0]
self.targets = []
idx = self.format_general_size
max_size = self.size
if (max_size * 4) > len(buff) :
max_size = len(buff) - idx - 8
for i in xrange(0, max_size) :
self.targets.append( unpack('=l', buff[idx:idx+4])[0] )
idx += 4
def add_note(self, msg) :
"""
Add a note to this instruction
:param msg: the message
:type msg: objects (string)
"""
self.notes.append( msg )
def get_notes(self) :
"""
Get all notes from this instruction
:rtype: a list of objects
"""
return self.notes
def get_op_value(self) :
"""
Get the value of the opcode
:rtype: int
"""
return self.ident
def get_keys(self) :
"""
Return the keys of the instruction
:rtype: a list of long
"""
return [(self.first_key+i) for i in range(0, len(self.targets))]
def get_values(self) :
return self.get_keys()
def get_targets(self) :
"""
Return the targets (address) of the instruction
:rtype: a list of long
"""
return self.targets
def get_output(self, idx=-1) :
"""
Return an additional output of the instruction
:rtype: string
"""
return " ".join("%x" % (self.first_key+i) for i in range(0, len(self.targets)))
def get_name(self) :
"""
Return the name of the instruction
:rtype: string
"""
return "packed-switch-payload"
def show_buff(self, pos) :
"""
Return the display of the instruction
:rtype: string
"""
buff = self.get_name() + " "
buff += "%x:" % self.first_key
for i in self.targets :
buff += " %x" % i
return buff
def show(self, pos) :
"""
Print the instruction
"""
print self.show_buff( pos ),
def get_length(self) :
return self.format_general_size + (self.size * calcsize('=L'))
def get_raw(self) :
return pack("=H", self.ident) + pack("=H", self.size) + pack("=i", self.first_key) + ''.join(pack("=l", i) for i in self.targets)
class Instruction35c(Instruction) :
"""
This class represents all instructions which have the 35c format
"""
def __init__(self, cm, buff) :
super(Instruction35c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 0 :
buff += "%s" % (kind)
elif self.A == 1 :
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2 :
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3 :
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4 :
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5 :
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_length(self) :
return 6
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction10x(Instruction) :
"""
This class represents all instructions which have the 10x format
"""
def __init__(self, cm, buff) :
super(Instruction10x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
#log_andro.debug("OP:%x %s" % (self.OP, args[0]))
def get_output(self, idx=-1) :
buff = ""
return buff
def get_length(self) :
return 2
def get_raw(self) :
return pack("=H", self.OP)
class Instruction21h(Instruction) :
"""
This class represents all instructions which have the 21h format
"""
def __init__(self, cm, buff) :
super(Instruction21h, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
self.formatted_operands = []
if self.OP == 0x15 :
self.formatted_operands.append( unpack( '=f', '\x00\x00' + pack('=h', self.BBBB ) )[0] )
elif self.OP == 0x19:
self.formatted_operands.append( unpack( '=d', '\x00\x00\x00\x00\x00\x00' + pack('=h', self.BBBB) )[0] )
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != [] :
buff += " # %s" % (str(self.formatted_operands))
return buff
def get_literals(self) :
return [ self.BBBB ]
def get_raw(self) :
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction11n(Instruction) :
"""
This class represents all instructions which have the 11n format
"""
def __init__(self, cm, buff) :
super(Instruction11n, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_length(self) :
return 2
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, %d" % (self.A, self.B)
return buff
def get_literals(self) :
return [ self.B ]
def get_raw(self) :
return pack("=H", (self.B << 12) | (self.A << 8) | self.OP)
class Instruction21c(Instruction) :
"""
This class represents all instructions which have the 21c format
"""
def __init__(self, cm, buff) :
super(Instruction21c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_ref_kind(self) :
return self.BBBB
def get_string(self) :
return get_kind(self.cm, self.get_kind(), self.BBBB)
def get_raw_string(self) :
return get_kind(self.cm, KIND_RAW_STRING, self.BBBB)
def get_raw(self) :
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction21s(Instruction) :
"""
This class represents all instructions which have the 21s format
"""
def __init__(self, cm, buff) :
super(Instruction21s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
self.formatted_operands = []
if self.OP == 0x16 :
self.formatted_operands.append( unpack( '=d', pack('=d', self.BBBB))[0] )
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBB)
if self.formatted_operands != [] :
buff += " # %s" % str(self.formatted_operands)
return buff
def get_literals(self) :
return [ self.BBBB ]
def get_raw(self) :
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction22c(Instruction) :
"""
This class represents all instructions which have the 22c format
"""
def __init__(self, cm, buff) :
super(Instruction22c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_ref_kind(self) :
return self.CCCC
def get_raw(self) :
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction22cs(Instruction) :
"""
This class represents all instructions which have the 22cs format
"""
def __init__(self, cm, buff) :
super(Instruction22cs, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.CCCC)
buff += "v%d, v%d, %s" % (self.A, self.B, kind)
return buff
def get_ref_kind(self) :
return self.CCCC
def get_raw(self) :
return pack("=HH", (self.B << 12) | (self.A << 8) | (self.OP), self.CCCC)
class Instruction31t(Instruction) :
"""
This class represents all instructions which have the 31t format
"""
def __init__(self, cm, buff) :
super(Instruction31t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, +%x (0x%x)" % (self.AA, self.BBBBBBBB, self.BBBBBBBB * 2 + idx)
return buff
def get_ref_off(self) :
return self.BBBBBBBB
def get_raw(self) :
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction31c(Instruction) :
"""
This class represents all instructions which have the 31c format
"""
def __init__(self, cm, buff) :
super(Instruction31c, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff += "v%d, %s" % (self.AA, kind)
return buff
def get_ref_kind(self) :
return self.BBBBBBBB
def get_string(self) :
"""
Return the string associated to the 'kind' argument
:rtype: string
"""
return get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
def get_raw(self) :
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction12x(Instruction) :
"""
This class represents all instructions which have the 12x format
"""
def __init__(self, cm, buff) :
super(Instruction12x, self).__init__()
i16 = unpack("=h", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s A:%x B:%x" % (self.OP, args[0], self.A, self.B))
def get_length(self) :
return 2
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d" % (self.A, self.B)
return buff
def get_raw(self) :
return pack("=H", (self.B << 12) | (self.A << 8) | (self.OP))
class Instruction11x(Instruction) :
"""
This class represents all instructions which have the 11x format
"""
def __init__(self, cm, buff) :
super(Instruction11x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self) :
return 2
def get_output(self, idx=-1) :
buff = ""
buff += "v%d" % (self.AA)
return buff
def get_raw(self) :
return pack("=H", (self.AA << 8) | self.OP)
class Instruction51l(Instruction) :
"""
This class represents all instructions which have the 51l format
"""
def __init__(self, cm, buff) :
super(Instruction51l, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBBBBBBBBBB = unpack("=q", buff[2:10])[0]
self.formatted_operands = []
if self.OP == 0x18 :
self.formatted_operands.append( unpack( '=d', pack('=q', self.BBBBBBBBBBBBBBBB ) )[0] )
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBBBBBBBBBB))
def get_length(self) :
return 10
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBBBBBBBBBB)
if self.formatted_operands != [] :
buff += " # %s" % str(self.formatted_operands)
return buff
def get_literals(self) :
return [ self.BBBBBBBBBBBBBBBB ]
def get_raw(self) :
return pack("=Hq", (self.AA << 8) | self.OP, self.BBBBBBBBBBBBBBBB)
class Instruction31i(Instruction) :
"""
This class represents all instructions which have the 3li format
"""
def __init__(self, cm, buff) :
super(Instruction31i, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBBBBBB = unpack("=i", buff[2:6])[0]
self.formatted_operands = []
if self.OP == 0x14 :
self.formatted_operands.append( unpack("=f", pack("=i", self.BBBBBBBB))[0] )
elif self.OP == 0x17 :
self.formatted_operands.append( unpack( '=d', pack('=d', self.BBBBBBBB))[0] )
#log_andro.debug("OP:%x %s AA:%x BBBBBBBBB:%x" % (self.OP, args[0], self.AA, self.BBBBBBBB))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, %d" % (self.AA, self.BBBBBBBB)
if self.formatted_operands != [] :
buff += " # %s" % str(self.formatted_operands)
return buff
def get_literals(self) :
return [ self.BBBBBBBB ]
def get_raw(self) :
return pack("=Hi", (self.AA << 8) | self.OP, self.BBBBBBBB)
class Instruction22x(Instruction) :
"""
This class represents all instructions which have the 22x format
"""
def __init__(self, cm, buff) :
super(Instruction22x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d" % (self.AA, self.BBBB)
return buff
def get_raw(self) :
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction23x(Instruction) :
"""
This class represents all instructions which have the 23x format
"""
def __init__(self, cm, buff) :
super(Instruction23x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
i16 = unpack("=H", buff[2:4])[0]
self.BB = i16 & 0xff
self.CC = (i16 >> 8) & 0xff
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d, v%d" % (self.AA, self.BB, self.CC)
return buff
def get_raw(self) :
return pack("=HH", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction20t(Instruction) :
"""
This class represents all instructions which have the 20t format
"""
def __init__(self, cm, buff) :
super(Instruction20t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AAAA:%x" % (self.OP, args[0], self.AAAA))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "%x" % (self.AAAA)
return buff
def get_ref_off(self) :
return self.AAAA
def get_raw(self) :
return pack("=Hh", self.OP, self.AAAA)
class Instruction21t(Instruction) :
"""
This class represents all instructions which have the 21t format
"""
def __init__(self, cm, buff) :
super(Instruction21t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, +%d" % (self.AA, self.BBBB)
return buff
def get_ref_off(self) :
return self.BBBB
def get_raw(self) :
return pack("=Hh", (self.AA << 8) | self.OP, self.BBBB)
class Instruction10t(Instruction) :
"""
This class represents all instructions which have the 10t format
"""
def __init__(self, cm, buff) :
super(Instruction10t, self).__init__()
self.OP = unpack("=B", buff[0:1])[0]
self.AA = unpack("=b", buff[1:2])[0]
#log_andro.debug("OP:%x %s AA:%x" % (self.OP, args[0], self.AA))
def get_length(self) :
return 2
def get_output(self, idx=-1) :
buff = ""
buff += "%x" % (self.AA)
return buff
def get_ref_off(self) :
return self.AA
def get_raw(self) :
return pack("=Bb", self.OP, self.AA)
class Instruction22t(Instruction) :
"""
This class represents all instructions which have the 22t format
"""
def __init__(self, cm, buff) :
super(Instruction22t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d, +%d" % (self.A, self.B, self.CCCC)
return buff
def get_ref_off(self) :
return self.CCCC
def get_raw(self) :
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22s(Instruction) :
"""
This class represents all instructions which have the 22s format
"""
def __init__(self, cm, buff) :
super(Instruction22s, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.A = (i16 >> 8) & 0xf
self.B = (i16 >> 12) & 0xf
self.CCCC = unpack("=h", buff[2:4])[0]
#log_andro.debug("OP:%x %s A:%x B:%x CCCC:%x" % (self.OP, args[0], self.A, self.B, self.CCCC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d, %d" % (self.A, self.B, self.CCCC)
return buff
def get_literals(self) :
return [ self.CCCC ]
def get_raw(self) :
return pack("=Hh", (self.B << 12) | (self.A << 8) | self.OP, self.CCCC)
class Instruction22b(Instruction) :
"""
This class represents all instructions which have the 22b format
"""
def __init__(self, cm, buff) :
super(Instruction22b, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BB = unpack("=B", buff[2:3])[0]
self.CC = unpack("=b", buff[3:4])[0]
#log_andro.debug("OP:%x %s AA:%x BB:%x CC:%x" % (self.OP, args[0], self.AA, self.BB, self.CC))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d, %d" % (self.AA, self.BB, self.CC)
return buff
def get_literals(self) :
return [ self.CC ]
def get_raw(self) :
return pack("=Hh", (self.AA << 8) | self.OP, (self.CC << 8) | self.BB)
class Instruction30t(Instruction) :
"""
This class represents all instructions which have the 30t format
"""
def __init__(self, cm, buff) :
super(Instruction30t, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAAAAAA = unpack("=i", buff[2:6])[0]
#log_andro.debug("OP:%x %s AAAAAAAA:%x" % (self.OP, args[0], self.AAAAAAAA))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
buff += "%x" % (self.AAAAAAAA)
return buff
def get_ref_off(self) :
return self.AAAAAAAA
def get_raw(self) :
return pack("=Hi", self.OP, self.AAAAAAAA)
class Instruction3rc(Instruction) :
"""
This class represents all instructions which have the 3rc format
"""
def __init__(self, cm, buff) :
super(Instruction3rc, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN :
buff += "v%d, %s" % (self.CCCC, kind)
else :
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction32x(Instruction) :
"""
This class represents all instructions which have the 32x format
"""
def __init__(self, cm, buff) :
super(Instruction32x, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AAAA = unpack("=H", buff[2:4])[0]
self.BBBB = unpack("=H", buff[4:6])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
buff += "v%d, v%d" % (self.AAAA, self.BBBB)
return buff
def get_raw(self) :
return pack("=HHH", self.OP, self.AAAA, self.BBBB)
class Instruction20bc(Instruction) :
"""
This class represents all instructions which have the 20bc format
"""
def __init__(self, cm, buff) :
super(Instruction20bc, self).__init__()
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
#log_andro.debug("OP:%x %s AA:%x BBBBB:%x" % (self.OP, args[0], self.AA, self.BBBB))
def get_length(self) :
return 4
def get_output(self, idx=-1) :
buff = ""
buff += "%d, %d" % (self.AA, self.BBBB)
return buff
def get_raw(self) :
return pack("=HH", (self.AA << 8) | self.OP, self.BBBB)
class Instruction35mi(Instruction) :
"""
This class represents all instructions which have the 35mi format
"""
def __init__(self, cm, buff) :
super(Instruction35mi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1 :
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2 :
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3 :
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4 :
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5 :
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_length(self) :
return 6
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction35ms(Instruction) :
"""
This class represents all instructions which have the 35ms format
"""
def __init__(self, cm, buff) :
super(Instruction35ms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.G = (i16 >> 8) & 0xf
self.A = (i16 >> 12) & 0xf
self.BBBB = unpack("=H", buff[2:4])[0]
i16 = unpack("=H", buff[4:6])[0]
self.C = i16 & 0xf
self.D = (i16 >> 4) & 0xf
self.E = (i16 >> 8) & 0xf
self.F = (i16 >> 12) & 0xf
#log_andro.debug("OP:%x %s G:%x A:%x BBBB:%x C:%x D:%x E:%x F:%x" % (self.OP, args[0], self.G, self.A, self.BBBB, self.C, self.D, self.E, self.F))
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.A == 1 :
buff += "v%d, %s" % (self.C, kind)
elif self.A == 2 :
buff += "v%d, v%d, %s" % (self.C, self.D, kind)
elif self.A == 3 :
buff += "v%d, v%d, v%d, %s" % (self.C, self.D, self.E, kind)
elif self.A == 4 :
buff += "v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, kind)
elif self.A == 5 :
buff += "v%d, v%d, v%d, v%d, v%d, %s" % (self.C, self.D, self.E, self.F, self.G, kind)
return buff
def get_length(self) :
return 6
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.A << 12) | (self.G << 8) | self.OP, self.BBBB, (self.F << 12) | (self.E << 8) | (self.D << 4) | self.C)
class Instruction3rmi(Instruction) :
"""
This class represents all instructions which have the 3rmi format
"""
def __init__(self, cm, buff) :
super(Instruction3rmi, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN :
buff += "v%d, %s" % (self.CCCC, kind)
else :
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction3rms(Instruction) :
"""
This class represents all instructions which have the 3rms format
"""
def __init__(self, cm, buff) :
super(Instruction3rms, self).__init__()
self.cm = cm
i16 = unpack("=H", buff[0:2])[0]
self.OP = i16 & 0xff
self.AA = (i16 >> 8) & 0xff
self.BBBB = unpack("=H", buff[2:4])[0]
self.CCCC = unpack("=H", buff[4:6])[0]
self.NNNN = self.CCCC + self.AA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AA, self.BBBB, self.CCCC, self.NNNN))
def get_length(self) :
return 6
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBB)
if self.CCCC == self.NNNN :
buff += "v%d, %s" % (self.CCCC, kind)
else :
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_ref_kind(self) :
return self.BBBB
def get_raw(self) :
return pack("=HHH", (self.AA << 8) | self.OP, self.BBBB, self.CCCC)
class Instruction41c(Instruction) :
"""
This class represents all instructions which have the 41c format
"""
def __init__(self, cm, buff) :
super(Instruction41c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self) :
return 8
def get_output(self, idx=-1) :
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "v%d, %s" % (self.AAAA, kind)
return buff
def get_ref_kind(self) :
return self.BBBBBBBB
def get_raw(self) :
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction40sc(Instruction) :
"""
This class represents all instructions which have the 40sc format
"""
def __init__(self, cm, buff) :
super(Instruction40sc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBBBBBB))
def get_length(self) :
return 8
def get_output(self, idx=-1) :
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
buff = ""
buff += "%d, %s" % (self.AAAA, kind)
return buff
def get_ref_kind(self) :
return self.BBBBBBBB
def get_raw(self) :
return pack("=HIH", self.OP, self.BBBBBBBB, self.AAAA)
class Instruction52c(Instruction) :
"""
This class represents all instructions which have the 52c format
"""
def __init__(self, cm, buff) :
super(Instruction52c, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.CCCCCCCC = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.BBBB = unpack("=H", buff[8:10])[0]
#log_andro.debug("OP:%x %s AAAAA:%x BBBBB:%x" % (self.OP, args[0], self.AAAA, self.BBBB))
def get_length(self) :
return 10
def get_output(self, idx=-1) :
kind = get_kind(self.cm, self.get_kind(), self.CCCCCCCC)
buff = ""
buff += "v%d, v%d, %s" % (self.AAAA, self.BBBB, kind)
return buff
def get_ref_kind(self) :
return self.CCCCCCCC
def get_raw(self) :
return pack("=HIHH", self.OP, self.CCCCCCCC, self.AAAA, self.BBBB)
class Instruction5rc(Instruction) :
"""
This class represents all instructions which have the 5rc format
"""
def __init__(self, cm, buff) :
super(Instruction5rc, self).__init__()
self.cm = cm
self.OP = unpack("=H", buff[0:2])[0]
self.BBBBBBBB = unpack("=I", buff[2:6])[0]
self.AAAA = unpack("=H", buff[6:8])[0]
self.CCCC = unpack("=H", buff[8:10])[0]
self.NNNN = self.CCCC + self.AAAA - 1
#log_andro.debug("OP:%x %s AA:%x BBBB:%x CCCC:%x NNNN:%d" % (self.OP, args[0], self.AAAA, self.BBBBBBBB, self.CCCC, self.NNNN))
def get_length(self) :
return 10
def get_output(self, idx=-1) :
buff = ""
kind = get_kind(self.cm, self.get_kind(), self.BBBBBBBB)
if self.CCCC == self.NNNN :
buff += "v%d, %s" % (self.CCCC, kind)
else :
buff += "v%d ... v%d, %s" % (self.CCCC, self.NNNN, kind)
return buff
def get_ref_kind(self) :
return self.BBBBBBBB
def get_raw(self) :
return pack("=HIHH", self.OP, self.BBBBBBBB, self.AAAA, self.CCCC)
DALVIK_OPCODES_FORMAT = {
0x00 : [Instruction10x, [ "nop" ] ],
0x01 : [Instruction12x, [ "move" ] ],
0x02 : [Instruction22x, [ "move/from16" ] ],
0x03 : [Instruction32x, [ "move/16" ] ],
0x04 : [Instruction12x, [ "move-wide" ] ],
0x05 : [Instruction22x, [ "move-wide/from16" ] ],
0x06 : [Instruction32x, [ "move-wide/16" ] ],
0x07 : [Instruction12x, [ "move-object" ] ],
0x08 : [Instruction22x, [ "move-object/from16" ] ],
0x09 : [Instruction32x, [ "move-object/16" ] ],
0x0a : [Instruction11x, [ "move-result" ] ],
0x0b : [Instruction11x, [ "move-result-wide" ] ],
0x0c : [Instruction11x, [ "move-result-object" ] ],
0x0d : [Instruction11x, [ "move-exception" ] ],
0x0e : [Instruction10x, [ "return-void" ] ],
0x0f : [Instruction11x, [ "return" ] ],
0x10 : [Instruction11x, [ "return-wide" ] ],
0x11 : [Instruction11x, [ "return-object" ] ],
0x12 : [Instruction11n, [ "const/4" ] ],
0x13 : [Instruction21s, [ "const/16" ] ],
0x14 : [Instruction31i, [ "const" ] ],
0x15 : [Instruction21h, [ "const/high16" ] ],
0x16 : [Instruction21s, [ "const-wide/16" ] ],
0x17 : [Instruction31i, [ "const-wide/32" ] ],
0x18 : [Instruction51l, [ "const-wide" ] ],
0x19 : [Instruction21h, [ "const-wide/high16" ] ],
0x1a : [Instruction21c, [ "const-string", KIND_STRING ] ],
0x1b : [Instruction31c, [ "const-string/jumbo", KIND_STRING ] ],
0x1c : [Instruction21c, [ "const-class", KIND_TYPE ] ],
0x1d : [Instruction11x, [ "monitor-enter" ] ],
0x1e : [Instruction11x, [ "monitor-exit" ] ],
0x1f : [Instruction21c, [ "check-cast", KIND_TYPE ] ],
0x20 : [Instruction22c, [ "instance-of", KIND_TYPE ] ],
0x21 : [Instruction12x, [ "array-length", KIND_TYPE ] ],
0x22 : [Instruction21c, [ "new-instance", KIND_TYPE ] ],
0x23 : [Instruction22c, [ "new-array", KIND_TYPE ] ],
0x24 : [Instruction35c, [ "filled-new-array", KIND_TYPE ] ],
0x25 : [Instruction3rc, [ "filled-new-array/range", KIND_TYPE ] ],
0x26 : [Instruction31t, [ "fill-array-data" ] ],
0x27 : [Instruction11x, [ "throw" ] ],
0x28 : [Instruction10t, [ "goto" ] ],
0x29 : [Instruction20t, [ "goto/16" ] ],
0x2a : [Instruction30t, [ "goto/32" ] ],
0x2b : [Instruction31t, [ "packed-switch" ] ],
0x2c : [Instruction31t, [ "sparse-switch" ] ],
0x2d : [Instruction23x, [ "cmpl-float" ] ],
0x2e : [Instruction23x, [ "cmpg-float" ] ],
0x2f : [Instruction23x, [ "cmpl-double" ] ],
0x30 : [Instruction23x, [ "cmpg-double" ] ],
0x31 : [Instruction23x, [ "cmp-long" ] ],
0x32 : [Instruction22t, [ "if-eq" ] ],
0x33 : [Instruction22t, [ "if-ne" ] ],
0x34 : [Instruction22t, [ "if-lt" ] ],
0x35 : [Instruction22t, [ "if-ge" ] ],
0x36 : [Instruction22t, [ "if-gt" ] ],
0x37 : [Instruction22t, [ "if-le" ] ],
0x38 : [Instruction21t, [ "if-eqz" ] ],
0x39 : [Instruction21t, [ "if-nez" ] ],
0x3a : [Instruction21t, [ "if-ltz" ] ],
0x3b : [Instruction21t, [ "if-gez" ] ],
0x3c : [Instruction21t, [ "if-gtz" ] ],
0x3d : [Instruction21t, [ "if-lez" ] ],
#unused
0x3e : [Instruction10x, [ "nop" ] ],
0x3f : [Instruction10x, [ "nop" ] ],
0x40 : [Instruction10x, [ "nop" ] ],
0x41 : [Instruction10x, [ "nop" ] ],
0x42 : [Instruction10x, [ "nop" ] ],
0x43 : [Instruction10x, [ "nop" ] ],
0x44 : [Instruction23x, [ "aget" ] ],
0x45 : [Instruction23x, [ "aget-wide" ] ],
0x46 : [Instruction23x, [ "aget-object" ] ],
0x47 : [Instruction23x, [ "aget-boolean" ] ],
0x48 : [Instruction23x, [ "aget-byte" ] ],
0x49 : [Instruction23x, [ "aget-char" ] ],
0x4a : [Instruction23x, [ "aget-short" ] ],
0x4b : [Instruction23x, [ "aput" ] ],
0x4c : [Instruction23x, [ "aput-wide" ] ],
0x4d : [Instruction23x, [ "aput-object" ] ],
0x4e : [Instruction23x, [ "aput-boolean" ] ],
0x4f : [Instruction23x, [ "aput-byte" ] ],
0x50 : [Instruction23x, [ "aput-char" ] ],
0x51 : [Instruction23x, [ "aput-short" ] ],
0x52 : [Instruction22c, [ "iget", KIND_FIELD ] ],
0x53 : [Instruction22c, [ "iget-wide", KIND_FIELD ] ],
0x54 : [Instruction22c, [ "iget-object", KIND_FIELD ] ],
0x55 : [Instruction22c, [ "iget-boolean", KIND_FIELD ] ],
0x56 : [Instruction22c, [ "iget-byte", KIND_FIELD ] ],
0x57 : [Instruction22c, [ "iget-char", KIND_FIELD ] ],
0x58 : [Instruction22c, [ "iget-short", KIND_FIELD ] ],
0x59 : [Instruction22c, [ "iput", KIND_FIELD ] ],
0x5a : [Instruction22c, [ "iput-wide", KIND_FIELD ] ],
0x5b : [Instruction22c, [ "iput-object", KIND_FIELD ] ],
0x5c : [Instruction22c, [ "iput-boolean", KIND_FIELD ] ],
0x5d : [Instruction22c, [ "iput-byte", KIND_FIELD ] ],
0x5e : [Instruction22c, [ "iput-char", KIND_FIELD ] ],
0x5f : [Instruction22c, [ "iput-short", KIND_FIELD ] ],
0x60 : [Instruction21c, [ "sget", KIND_FIELD ] ],
0x61 : [Instruction21c, [ "sget-wide", KIND_FIELD ] ],
0x62 : [Instruction21c, [ "sget-object", KIND_FIELD ] ],
0x63 : [Instruction21c, [ "sget-boolean", KIND_FIELD ] ],
0x64 : [Instruction21c, [ "sget-byte", KIND_FIELD ] ],
0x65 : [Instruction21c, [ "sget-char", KIND_FIELD ] ],
0x66 : [Instruction21c, [ "sget-short", KIND_FIELD ] ],
0x67 : [Instruction21c, [ "sput", KIND_FIELD ] ],
0x68 : [Instruction21c, [ "sput-wide", KIND_FIELD ] ],
0x69 : [Instruction21c, [ "sput-object", KIND_FIELD ] ],
0x6a : [Instruction21c, [ "sput-boolean", KIND_FIELD ] ],
0x6b : [Instruction21c, [ "sput-byte", KIND_FIELD ] ],
0x6c : [Instruction21c, [ "sput-char", KIND_FIELD ] ],
0x6d : [Instruction21c, [ "sput-short", KIND_FIELD ] ],
0x6e : [Instruction35c, [ "invoke-virtual", KIND_METH ] ],
0x6f : [Instruction35c, [ "invoke-super", KIND_METH ] ],
0x70 : [Instruction35c, [ "invoke-direct", KIND_METH ] ],
0x71 : [Instruction35c, [ "invoke-static", KIND_METH ] ],
0x72 : [Instruction35c, [ "invoke-interface", KIND_METH ] ],
# unused
0x73 : [Instruction10x, [ "nop" ] ],
0x74 : [Instruction3rc, [ "invoke-virtual/range", KIND_METH ] ],
0x75 : [Instruction3rc, [ "invoke-super/range", KIND_METH ] ],
0x76 : [Instruction3rc, [ "invoke-direct/range", KIND_METH ] ],
0x77 : [Instruction3rc, [ "invoke-static/range", KIND_METH ] ],
0x78 : [Instruction3rc, [ "invoke-interface/range", KIND_METH ] ],
# unused
0x79 : [Instruction10x, [ "nop" ] ],
0x7a : [Instruction10x, [ "nop" ] ],
0x7b : [Instruction12x, [ "neg-int" ] ],
0x7c : [Instruction12x, [ "not-int" ] ],
0x7d : [Instruction12x, [ "neg-long" ] ],
0x7e : [Instruction12x, [ "not-long" ] ],
0x7f : [Instruction12x, [ "neg-float" ] ],
0x80 : [Instruction12x, [ "neg-double" ] ],
0x81 : [Instruction12x, [ "int-to-long" ] ],
0x82 : [Instruction12x, [ "int-to-float" ] ],
0x83 : [Instruction12x, [ "int-to-double" ] ],
0x84 : [Instruction12x, [ "long-to-int" ] ],
0x85 : [Instruction12x, [ "long-to-float" ] ],
0x86 : [Instruction12x, [ "long-to-double" ] ],
0x87 : [Instruction12x, [ "float-to-int" ] ],
0x88 : [Instruction12x, [ "float-to-long" ] ],
0x89 : [Instruction12x, [ "float-to-double" ] ],
0x8a : [Instruction12x, [ "double-to-int" ] ],
0x8b : [Instruction12x, [ "double-to-long" ] ],
0x8c : [Instruction12x, [ "double-to-float" ] ],
0x8d : [Instruction12x, [ "int-to-byte" ] ],
0x8e : [Instruction12x, [ "int-to-char" ] ],
0x8f : [Instruction12x, [ "int-to-short" ] ],
0x90 : [Instruction23x, [ "add-int" ] ],
0x91 : [Instruction23x, [ "sub-int" ] ],
0x92 : [Instruction23x, [ "mul-int" ] ],
0x93 : [Instruction23x, [ "div-int" ] ],
0x94 : [Instruction23x, [ "rem-int" ] ],
0x95 : [Instruction23x, [ "and-int" ] ],
0x96 : [Instruction23x, [ "or-int" ] ],
0x97 : [Instruction23x, [ "xor-int" ] ],
0x98 : [Instruction23x, [ "shl-int" ] ],
0x99 : [Instruction23x, [ "shr-int" ] ],
0x9a : [Instruction23x, [ "ushr-int" ] ],
0x9b : [Instruction23x, [ "add-long" ] ],
0x9c : [Instruction23x, [ "sub-long" ] ],
0x9d : [Instruction23x, [ "mul-long" ] ],
0x9e : [Instruction23x, [ "div-long" ] ],
0x9f : [Instruction23x, [ "rem-long" ] ],
0xa0 : [Instruction23x, [ "and-long" ] ],
0xa1 : [Instruction23x, [ "or-long" ] ],
0xa2 : [Instruction23x, [ "xor-long" ] ],
0xa3 : [Instruction23x, [ "shl-long" ] ],
0xa4 : [Instruction23x, [ "shr-long" ] ],
0xa5 : [Instruction23x, [ "ushr-long" ] ],
0xa6 : [Instruction23x, [ "add-float" ] ],
0xa7 : [Instruction23x, [ "sub-float" ] ],
0xa8 : [Instruction23x, [ "mul-float" ] ],
0xa9 : [Instruction23x, [ "div-float" ] ],
0xaa : [Instruction23x, [ "rem-float" ] ],
0xab : [Instruction23x, [ "add-double" ] ],
0xac : [Instruction23x, [ "sub-double" ] ],
0xad : [Instruction23x, [ "mul-double" ] ],
0xae : [Instruction23x, [ "div-double" ] ],
0xaf : [Instruction23x, [ "rem-double" ] ],
0xb0 : [Instruction12x, [ "add-int/2addr" ] ],
0xb1 : [Instruction12x, [ "sub-int/2addr" ] ],
0xb2 : [Instruction12x, [ "mul-int/2addr" ] ],
0xb3 : [Instruction12x, [ "div-int/2addr" ] ],
0xb4 : [Instruction12x, [ "rem-int/2addr" ] ],
0xb5 : [Instruction12x, [ "and-int/2addr" ] ],
0xb6 : [Instruction12x, [ "or-int/2addr" ] ],
0xb7 : [Instruction12x, [ "xor-int/2addr" ] ],
0xb8 : [Instruction12x, [ "shl-int/2addr" ] ],
0xb9 : [Instruction12x, [ "shr-int/2addr" ] ],
0xba : [Instruction12x, [ "ushr-int/2addr" ] ],
0xbb : [Instruction12x, [ "add-long/2addr" ] ],
0xbc : [Instruction12x, [ "sub-long/2addr" ] ],
0xbd : [Instruction12x, [ "mul-long/2addr" ] ],
0xbe : [Instruction12x, [ "div-long/2addr" ] ],
0xbf : [Instruction12x, [ "rem-long/2addr" ] ],
0xc0 : [Instruction12x, [ "and-long/2addr" ] ],
0xc1 : [Instruction12x, [ "or-long/2addr" ] ],
0xc2 : [Instruction12x, [ "xor-long/2addr" ] ],
0xc3 : [Instruction12x, [ "shl-long/2addr" ] ],
0xc4 : [Instruction12x, [ "shr-long/2addr" ] ],
0xc5 : [Instruction12x, [ "ushr-long/2addr" ] ],
0xc6 : [Instruction12x, [ "add-float/2addr" ] ],
0xc7 : [Instruction12x, [ "sub-float/2addr" ] ],
0xc8 : [Instruction12x, [ "mul-float/2addr" ] ],
0xc9 : [Instruction12x, [ "div-float/2addr" ] ],
0xca : [Instruction12x, [ "rem-float/2addr" ] ],
0xcb : [Instruction12x, [ "add-double/2addr" ] ],
0xcc : [Instruction12x, [ "sub-double/2addr" ] ],
0xcd : [Instruction12x, [ "mul-double/2addr" ] ],
0xce : [Instruction12x, [ "div-double/2addr" ] ],
0xcf : [Instruction12x, [ "rem-double/2addr" ] ],
0xd0 : [Instruction22s, [ "add-int/lit16" ] ],
0xd1 : [Instruction22s, [ "rsub-int" ] ],
0xd2 : [Instruction22s, [ "mul-int/lit16" ] ],
0xd3 : [Instruction22s, [ "div-int/lit16" ] ],
0xd4 : [Instruction22s, [ "rem-int/lit16" ] ],
0xd5 : [Instruction22s, [ "and-int/lit16" ] ],
0xd6 : [Instruction22s, [ "or-int/lit16" ] ],
0xd7 : [Instruction22s, [ "xor-int/lit16" ] ],
0xd8 : [Instruction22b, [ "add-int/lit8" ] ],
0xd9 : [Instruction22b, [ "rsub-int/lit8" ] ],
0xda : [Instruction22b, [ "mul-int/lit8" ] ],
0xdb : [Instruction22b, [ "div-int/lit8" ] ],
0xdc : [Instruction22b, [ "rem-int/lit8" ] ],
0xdd : [Instruction22b, [ "and-int/lit8" ] ],
0xde : [Instruction22b, [ "or-int/lit8" ] ],
0xdf : [Instruction22b, [ "xor-int/lit8" ] ],
0xe0 : [Instruction22b, [ "shl-int/lit8" ] ],
0xe1 : [Instruction22b, [ "shr-int/lit8" ] ],
0xe2 : [Instruction22b, [ "ushr-int/lit8" ] ],
# expanded opcodes
0xe3 : [Instruction22c, [ "iget-volatile", KIND_FIELD ] ],
0xe4 : [Instruction22c, [ "iput-volatile", KIND_FIELD ] ],
0xe5 : [Instruction21c, [ "sget-volatile", KIND_FIELD ] ],
0xe6 : [Instruction21c, [ "sput-volatile", KIND_FIELD ] ],
0xe7 : [Instruction22c, [ "iget-object-volatile", KIND_FIELD ] ],
0xe8 : [Instruction22c, [ "iget-wide-volatile", KIND_FIELD ] ],
0xe9 : [Instruction22c, [ "iput-wide-volatile", KIND_FIELD ] ],
0xea : [Instruction21c, [ "sget-wide-volatile", KIND_FIELD ] ],
0xeb : [Instruction21c, [ "sput-wide-volatile", KIND_FIELD ] ],
0xec : [Instruction10x, [ "breakpoint" ] ],
0xed : [Instruction20bc, [ "throw-verification-error", VARIES ] ],
0xee : [Instruction35mi, [ "execute-inline", INLINE_METHOD ] ],
0xef : [Instruction3rmi, [ "execute-inline/range", INLINE_METHOD ] ],
0xf0 : [Instruction35c, [ "invoke-object-init/range", KIND_METH ] ],
0xf1 : [Instruction10x, [ "return-void-barrier" ] ],
0xf2 : [Instruction22cs, [ "iget-quick", FIELD_OFFSET ] ],
0xf3 : [Instruction22cs, [ "iget-wide-quick", FIELD_OFFSET ] ],
0xf4 : [Instruction22cs, [ "iget-object-quick", FIELD_OFFSET ] ],
0xf5 : [Instruction22cs, [ "iput-quick", FIELD_OFFSET ] ],
0xf6 : [Instruction22cs, [ "iput-wide-quick", FIELD_OFFSET ] ],
0xf7 : [Instruction22cs, [ "iput-object-quick", FIELD_OFFSET ] ],
0xf8 : [Instruction35ms, [ "invoke-virtual-quick", VTABLE_OFFSET ] ],
0xf9 : [Instruction3rms, [ "invoke-virtual-quick/range", VTABLE_OFFSET ] ],
0xfa : [Instruction35ms, [ "invoke-super-quick", VTABLE_OFFSET ] ],
0xfb : [Instruction3rms, [ "invoke-super-quick/range", VTABLE_OFFSET ] ],
0xfc : [Instruction22c, [ "iput-object-volatile", KIND_FIELD ] ],
0xfd : [Instruction21c, [ "sget-object-volatile", KIND_FIELD ] ],
0xfe : [Instruction21c, [ "sput-object-volatile", KIND_FIELD ] ],
}
DALVIK_OPCODES_PAYLOAD = {
0x0100 : [PackedSwitch],
0x0200 : [SparseSwitch],
0x0300 : [FillArrayData],
}
INLINE_METHODS = [
[ "Lorg/apache/harmony/dalvik/NativeTestTarget;", "emptyInlineMethod", "()V" ],
[ "Ljava/lang/String;", "charAt", "(I)C" ],
[ "Ljava/lang/String;", "compareTo", "(Ljava/lang/String;)I" ],
[ "Ljava/lang/String;", "equals", "(Ljava/lang/Object;)Z" ],
[ "Ljava/lang/String;", "fastIndexOf", "(II)I" ],
[ "Ljava/lang/String;", "isEmpty", "()Z" ],
[ "Ljava/lang/String;", "length", "()I" ],
[ "Ljava/lang/Math;", "abs", "(I)I" ],
[ "Ljava/lang/Math;", "abs", "(J)J" ],
[ "Ljava/lang/Math;", "abs", "(F)F" ],
[ "Ljava/lang/Math;", "abs", "(D)D" ],
[ "Ljava/lang/Math;", "min", "(II)I" ],
[ "Ljava/lang/Math;", "max", "(II)I" ],
[ "Ljava/lang/Math;", "sqrt", "(D)D" ],
[ "Ljava/lang/Math;", "cos", "(D)D" ],
[ "Ljava/lang/Math;", "sin", "(D)D" ],
[ "Ljava/lang/Float;", "floatToIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "floatToRawIntBits", "(F)I" ],
[ "Ljava/lang/Float;", "intBitsToFloat", "(I)F" ],
[ "Ljava/lang/Double;", "doubleToLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "doubleToRawLongBits", "(D)J" ],
[ "Ljava/lang/Double;", "longBitsToDouble", "(J)D" ],
]
DALVIK_OPCODES_EXTENDED_WIDTH = {
0x00ff: [ Instruction41c, ["const-class/jumbo", KIND_TYPE ] ],
0x01ff: [ Instruction41c, ["check-cast/jumbo", KIND_TYPE ] ],
0x02ff: [ Instruction52c, ["instance-of/jumbo", KIND_TYPE ] ],
0x03ff: [ Instruction41c, ["new-instance/jumbo", KIND_TYPE ] ],
0x04ff: [ Instruction52c, ["new-array/jumbo", KIND_TYPE ] ],
0x05ff: [ Instruction5rc, ["filled-new-array/jumbo", KIND_TYPE ] ],
0x06ff: [ Instruction52c, ["iget/jumbo", KIND_FIELD ] ],
0x07ff: [ Instruction52c, ["iget-wide/jumbo", KIND_FIELD ] ],
0x08ff: [ Instruction52c, ["iget-object/jumbo", KIND_FIELD ] ],
0x09ff: [ Instruction52c, ["iget-boolean/jumbo", KIND_FIELD ] ],
0x0aff: [ Instruction52c, ["iget-byte/jumbo", KIND_FIELD ] ],
0x0bff: [ Instruction52c, ["iget-char/jumbo", KIND_FIELD ] ],
0x0cff: [ Instruction52c, ["iget-short/jumbo", KIND_FIELD ] ],
0x0dff: [ Instruction52c, ["iput/jumbo", KIND_FIELD ] ],
0x0eff: [ Instruction52c, ["iput-wide/jumbo", KIND_FIELD ] ],
0x0fff: [ Instruction52c, ["iput-object/jumbo", KIND_FIELD ] ],
0x10ff: [ Instruction52c, ["iput-boolean/jumbo", KIND_FIELD ] ],
0x11ff: [ Instruction52c, ["iput-byte/jumbo", KIND_FIELD ] ],
0x12ff: [ Instruction52c, ["iput-char/jumbo", KIND_FIELD ] ],
0x13ff: [ Instruction52c, ["iput-short/jumbo", KIND_FIELD ] ],
0x14ff: [ Instruction41c, ["sget/jumbo", KIND_FIELD ] ],
0x15ff: [ Instruction41c, ["sget-wide/jumbo", KIND_FIELD ] ],
0x16ff: [ Instruction41c, ["sget-object/jumbo", KIND_FIELD ] ],
0x17ff: [ Instruction41c, ["sget-boolean/jumbo", KIND_FIELD ] ],
0x18ff: [ Instruction41c, ["sget-byte/jumbo", KIND_FIELD ] ],
0x19ff: [ Instruction41c, ["sget-char/jumbo", KIND_FIELD ] ],
0x1aff: [ Instruction41c, ["sget-short/jumbo", KIND_FIELD ] ],
0x1bff: [ Instruction41c, ["sput/jumbo", KIND_FIELD ] ],
0x1cff: [ Instruction41c, ["sput-wide/jumbo", KIND_FIELD ] ],
0x1dff: [ Instruction41c, ["sput-object/jumbo", KIND_FIELD ] ],
0x1eff: [ Instruction41c, ["sput-boolean/jumbo", KIND_FIELD ] ],
0x1fff: [ Instruction41c, ["sput-byte/jumbo", KIND_FIELD ] ],
0x20ff: [ Instruction41c, ["sput-char/jumbo", KIND_FIELD ] ],
0x21ff: [ Instruction41c, ["sput-short/jumbo", KIND_FIELD ] ],
0x22ff: [ Instruction5rc, ["invoke-virtual/jumbo", KIND_METH ] ],
0x23ff: [ Instruction5rc, ["invoke-super/jumbo", KIND_METH ] ],
0x24ff: [ Instruction5rc, ["invoke-direct/jumbo", KIND_METH ] ],
0x25ff: [ Instruction5rc, ["invoke-static/jumbo", KIND_METH ] ],
0x26ff: [ Instruction5rc, ["invoke-interface/jumbo", KIND_METH ] ],
}
DALVIK_OPCODES_OPTIMIZED = {
0xf2ff : [ Instruction5rc, ["invoke-object-init/jumbo", KIND_METH ] ],
0xf3ff : [ Instruction52c, ["iget-volatile/jumbo", KIND_FIELD ] ],
0xf4ff : [ Instruction52c, ["iget-wide-volatile/jumbo", KIND_FIELD ] ],
0xf5ff : [ Instruction52c, ["iget-object-volatile/jumbo ", KIND_FIELD ] ],
0xf6ff : [ Instruction52c, ["iput-volatile/jumbo", KIND_FIELD ] ],
0xf7ff : [ Instruction52c, ["iput-wide-volatile/jumbo", KIND_FIELD ] ],
0xf8ff : [ Instruction52c, ["iput-object-volatile/jumbo", KIND_FIELD ] ],
0xf9ff : [ Instruction41c, ["sget-volatile/jumbo", KIND_FIELD ] ],
0xfaff : [ Instruction41c, ["sget-wide-volatile/jumbo", KIND_FIELD ] ],
0xfbff : [ Instruction41c, ["sget-object-volatile/jumbo", KIND_FIELD ] ],
0xfcff : [ Instruction41c, ["sput-volatile/jumbo", KIND_FIELD ] ],
0xfdff : [ Instruction41c, ["sput-wide-volatile/jumbo", KIND_FIELD ] ],
0xfeff : [ Instruction41c, ["sput-object-volatile/jumbo", KIND_FIELD ] ],
0xffff : [ Instruction40sc, ["throw-verification-error/jumbo", VARIES ] ],
}
class Unresolved(Instruction) :
def __init__(self, data) :
self.data = data
def get_name(self) :
return "unresolved"
def get_op_value(self) :
return ord(self.data[0])
def get_output(self, idx=-1) :
return repr(self.data)
def get_length(self) :
return len(self.data)
def get_raw(self) :
return self.buff
def get_instruction(cm, op_value, buff, odex=False) :
try :
if not odex and (op_value >= 0xe3 and op_value <= 0xfe) :
return InstructionInvalid( cm, buff )
try :
return DALVIK_OPCODES_FORMAT[ op_value ][0]( cm, buff )
except KeyError :
return InstructionInvalid( cm, buff )
except :
return Unresolved( buff )
def get_extented_instruction(cm, op_value, buff) :
return DALVIK_OPCODES_EXTENDED_WIDTH[ op_value ][0]( cm, buff )
def get_optimize_instruction(cm, op_value, buff) :
return DALVIK_OPCODES_OPTIMIZED[ op_value ][0]( cm, buff )
def get_instruction_payload(op_value, buff) :
return DALVIK_OPCODES_PAYLOAD[ op_value ][0]( buff )
class LinearSweepAlgorithm :
"""
This class is used to disassemble a method. The algorithm used by this class is linear sweep.
"""
def get_instructions(self, cm, size, insn, idx) :
"""
:param cm: a ClassManager object
:type cm: :class:`ClassManager` object
:param size: the total size of the buffer
:type size: int
:param insn: a raw buffer where are the instructions
:type insn: string
:param idx: a start address in the buffer
:type idx: int
:rtype: a generator of :class:`Instruction` objects
"""
self.odex = cm.get_odex_format()
max_idx = size * calcsize( '=H' )
# Get instructions
while idx < max_idx :
obj = None
classic_instruction = True
op_value = unpack( '=B', insn[idx] )[0]
#print "%x %x" % (op_value, idx)
#payload instructions or extented/optimized instructions
if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx) :
op_value = unpack( '=H', insn[idx:idx+2] )[0]
# payload instructions ?
if op_value in DALVIK_OPCODES_PAYLOAD :
obj = get_instruction_payload( op_value, insn[idx:] )
classic_instruction = False
elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH :
obj = get_extented_instruction( cm, op_value, insn[idx:] )
classic_instruction = False
# optimized instructions ?
elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED) :
obj = get_optimized_instruction( cm, op_value, insn[idx:] )
classic_instruction = False
# classical instructions
if classic_instruction :
op_value = unpack( '=B', insn[idx] )[0]
obj = get_instruction( cm, op_value, insn[idx:], self.odex)
# emit instruction
yield obj
idx = idx + obj.get_length()
class DCode:
"""
This class represents the instructions of a method
:param class_manager: the ClassManager
:type class_manager: :class:`ClassManager` object
:param size: the total size of the buffer
:type size: int
:param buff: a raw buffer where are the instructions
:type buff: string
"""
def __init__(self, class_manager, size, buff) :
self.CM = class_manager
self.insn = buff
self.size = size
self.notes = {}
self.cached_instructions = []
self.idx = 0
def get_insn(self) :
"""
Get the insn buffer
:rtype: string
"""
return self.insn
def set_insn(self, insn) :
"""
Set a new raw buffer to disassemble
:param insn: the buffer
:type insn: string
"""
self.insn = insn
self.size = len(self.insn)
def set_idx(self, idx) :
"""
Set the start address of the buffer
:param idx: the index
:type idx: int
"""
self.idx = idx
def set_instructions(self, instructions) :
"""
Set the instructions
:param instructions: the list of instructions
:type instructions: a list of :class:`Instruction`
"""
self.cached_instructions = instructions
def get_instructions(self) :
"""
Get the instructions
:rtype: a generator of each :class:`Instruction` (or a cached list of instructions if you have setup instructions)
"""
# it is possible to a cache for instructions (avoid a new disasm)
if self.cached_instructions != [] :
for i in self.cached_instructions :
yield i
return
lsa = LinearSweepAlgorithm()
for i in lsa.get_instructions( self.CM, self.size, self.insn, self.idx ) :
yield i
def reload(self) :
pass
def add_inote(self, msg, idx, off=None) :
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if off != None :
idx = self.off_to_pos(off)
if idx not in self.notes :
self.notes[ idx ] = []
self.notes[ idx ].append(msg)
def get_instruction(self, idx, off=None) :
"""
Get a particular instruction by using (default) the index of the address if specified
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
if off != None :
idx = self.off_to_pos(off)
return [ i for i in self.get_instructions()][idx]
def off_to_pos(self, off) :
"""
Get the position of an instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: int
"""
idx = 0
nb = 0
for i in self.get_instructions() :
if idx == off :
return nb
nb += 1
idx += i.get_length()
return -1
def get_ins_off(self, off):
"""
Get a particular instruction by using the address
:param off: address of the instruction
:type off: int
:rtype: an :class:`Instruction` object
"""
idx = 0
for i in self.get_instructions() :
if idx == off :
return i
idx += i.get_length()
return None
def show(self) :
"""
Display this object
"""
nb = 0
idx = 0
for i in self.get_instructions() :
print nb, "0x%x" % idx,
i.show(nb)
print
idx += i.get_length()
nb += 1
def pretty_show(self, m_a) :
"""
Display (with a pretty print) this object
:param m_a: :class:`MethodAnalysis` object
"""
bytecode.PrettyShow( m_a.basic_blocks.gets(), self.notes )
bytecode.PrettyShowEx( m_a.exceptions.gets() )
def get_raw(self) :
"""
Return the raw buffer of this object
:rtype: string
"""
return ''.join(i.get_raw() for i in self.get_instructions())
def get_length(self) :
"""
Return the length of this object
:rtype: int
"""
return len(self.get_raw())
class TryItem :
"""
This class represents the try_item format
:param buff: a raw buffer where are the try_item format
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm) :
self.offset = buff.get_idx()
self.__CM = cm
self.start_addr = unpack("=I", buff.read(4))[0]
self.insn_count = unpack("=H", buff.read(2))[0]
self.handler_off = unpack("=H", buff.read(2))[0]
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_start_addr(self) :
"""
Get the start address of the block of code covered by this entry. The address is a count of 16-bit code units to the start of the first covered instruction.
:rtype: int
"""
return self.start_addr
def get_insn_count(self) :
"""
Get the number of 16-bit code units covered by this entry
:rtype: int
"""
return self.insn_count
def get_handler_off(self) :
"""
Get the offset in bytes from the start of the associated :class:`EncodedCatchHandlerList` to the :class:`EncodedCatchHandler` for this entry.
:rtype: int
"""
return self.handler_off
def get_raw(self) :
return pack("=I", self.start_addr) + pack("=H", self.insn_count) + pack("=H", self.handler_off)
def get_length(self) :
return len(self.get_raw())
class DalvikCode :
"""
This class represents the instructions of a method
:param buff: a raw buffer where are the instructions
:type buff: string
:param cm: the ClassManager
:type cm: :class:`ClassManager` object
"""
def __init__(self, buff, cm) :
self.__CM = cm
self.padding = ""
off = buff.get_idx()
while off % 4 != 0 :
self.padding += '\00'
off += 1
buff.set_idx( off )
self.__off = buff.get_idx()
self.registers_size = unpack("=H", buff.read(2))[0]
self.ins_size = unpack("=H", buff.read(2))[0]
self.outs_size = unpack("=H", buff.read(2))[0]
self.tries_size = unpack("=H", buff.read(2))[0]
self.debug_info_off = unpack("=I", buff.read(4))[0]
self.insns_size = unpack("=I", buff.read(4))[0]
ushort = calcsize( '=H' )
self.code = DCode( self.__CM, self.insns_size, buff.read( self.insns_size * ushort ) )
if (self.insns_size % 2 == 1) :
self.__padding = unpack("=H", buff.read(2))[0]
self.tries = []
self.handlers = None
if self.tries_size > 0 :
for i in xrange(0, self.tries_size) :
self.tries.append( TryItem( buff, self.__CM ) )
self.handlers = EncodedCatchHandlerList( buff, self.__CM )
def get_registers_size(self) :
"""
Get the number of registers used by this code
:rtype: int
"""
return self.registers_size
def get_ins_size(self) :
"""
Get the number of words of incoming arguments to the method that this code is for
:rtype: int
"""
return self.ins_size
def get_outs_size(self) :
"""
Get the number of words of outgoing argument space required by this code for method invocation
:rtype: int
"""
return self.outs_size
def get_tries_size(self) :
"""
Get the number of :class:`TryItem` for this instance
:rtype: int
"""
return self.tries_size
def get_debug_info_off(self) :
"""
Get the offset from the start of the file to the debug info (line numbers + local variable info) sequence for this code, or 0 if there simply is no information
:rtype: int
"""
return self.debug_info_off
def get_insns_size(self) :
"""
Get the size of the instructions list, in 16-bit code units
:rtype: int
"""
return self.insns_size
def get_handlers(self) :
"""
Get the bytes representing a list of lists of catch types and associated handler addresses.
:rtype: :class:`EncodedCatchHandlerList`
"""
return self.handlers
def get_tries(self) :
"""
Get the array indicating where in the code exceptions are caught and how to handle them
:rtype: a list of :class:`TryItem` objects
"""
return self.tries
def get_debug(self) :
"""
Return the associated debug object
:rtype: :class:`DebugInfoItem`
"""
return self.__CM.get_debug_off( self.debug_info_off )
def get_bc(self) :
"""
Return the associated code object
:rtype: :class:`DCode`
"""
return self.code
def set_idx(self, idx) :
self.code.set_idx(idx)
def reload(self) :
self.code.reload()
def get_length(self) :
return self.insns_size
def _begin_show(self) :
debug("registers_size: %d" % self.registers_size)
debug("ins_size: %d" % self.ins_size)
debug("outs_size: %d" % self.outs_size)
debug("tries_size: %d" % self.tries_size)
debug("debug_info_off: %d" % self.debug_info_off)
debug("insns_size: %d" % self.insns_size)
bytecode._PrintBanner()
def show(self) :
self._begin_show()
self.code.show()
self._end_show()
def _end_show(self) :
bytecode._PrintBanner()
def pretty_show(self, m_a) :
self._begin_show()
self.code.pretty_show(m_a)
self._end_show()
def get_obj(self) :
return [ i for i in self.handlers ]
def get_raw(self) :
code_raw = self.code.get_raw()
self.insns_size = (len(code_raw) / 2) + (len(code_raw) % 2)
buff = self.padding
buff += pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) + \
code_raw
if (self.insns_size % 2 == 1) :
buff += pack("=H", self.__padding)
if self.tries_size > 0 :
buff += ''.join(i.get_raw() for i in self.tries)
buff += self.handlers.get_raw()
return buff
def add_inote(self, msg, idx, off=None) :
"""
Add a message to a specific instruction by using (default) the index of the address if specified
:param msg: the message
:type msg: string
:param idx: index of the instruction (the position in the list of the instruction)
:type idx: int
:param off: address of the instruction
:type off: int
"""
if self.code :
return self.code.add_inote(msg, idx, off)
def get_instruction(self, idx, off=None) :
if self.code :
return self.code.get_instruction(idx, off)
def get_size(self) :
length = len(self.padding)
length += len( pack("=H", self.registers_size) + \
pack("=H", self.ins_size) + \
pack("=H", self.outs_size) + \
pack("=H", self.tries_size) + \
pack("=I", self.debug_info_off) + \
pack("=I", self.insns_size) )
length += self.code.get_length()
if (self.insns_size % 2 == 1) :
length += len(pack("=H", self.__padding))
if self.tries_size > 0 :
for i in self.tries :
length += i.get_length()
length += self.handlers.get_length()
return length
def get_off(self) :
return self.__off
class CodeItem :
def __init__(self, size, buff, cm) :
self.__CM = cm
self.offset = buff.get_idx()
self.code = []
self.__code_off = {}
for i in xrange(0, size) :
x = DalvikCode( buff, cm )
self.code.append( x )
self.__code_off[ x.get_off() ] = x
def set_off(self, off) :
self.offset = off
def get_off(self) :
return self.offset
def get_code(self, off) :
try :
return self.__code_off[off]
except KeyError :
return None
def reload(self) :
for i in self.code :
i.reload()
def show(self) :
print "CODE_ITEM"
for i in self.code :
i.show()
def get_obj(self) :
return [ i for i in self.code ]
def get_raw(self) :
return ''.join(i.get_raw() for i in self.code)
def get_length(self) :
length = 0
for i in self.code :
length += i.get_size()
return length
class MapItem :
def __init__(self, buff, cm) :
self.__CM = cm
self.off = buff.get_idx()
self.type = unpack("=H", buff.read(2))[0]
self.unused = unpack("=H", buff.read(2))[0]
self.size = unpack("=I", buff.read(4))[0]
self.offset = unpack("=I", buff.read(4))[0]
self.item = None
buff.set_idx( self.offset )
lazy_analysis = self.__CM.get_lazy_analysis()
if lazy_analysis :
self.next_lazy(buff, cm)
else :
self.next(buff, cm)
def get_off(self) :
return self.off
def get_offset(self) :
return self.offset
def get_type(self) :
return self.type
def get_size(self) :
return self.size
def next(self, buff, cm) :
debug("%s @ 0x%x(%d) %d %x" % (TYPE_MAP_ITEM[ self.type ], buff.get_idx(), buff.get_idx(), self.size, self.offset))
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM" :
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM" :
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM" :
self.item = TypeHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM" :
self.item = ProtoHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM" :
self.item = FieldHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM" :
self.item = MethodHIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM" :
self.item = ClassHDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM" :
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_ITEM" :
self.item = [ AnnotationItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_ITEM" :
self.item = [ AnnotationSetItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATIONS_DIRECTORY_ITEM" :
self.item = [ AnnotationsDirectoryItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ANNOTATION_SET_REF_LIST" :
self.item = [ AnnotationSetRefList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST" :
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM" :
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM" :
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM" :
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM" :
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST" :
pass # It's me I think !!!
else :
bytecode.Exit( "Map item %d @ 0x%x(%d) is unknown" % (self.type, buff.get_idx(), buff.get_idx()) )
def next_lazy(self, buff, cm) :
if TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_ID_ITEM" :
self.item = [ StringIdItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CODE_ITEM" :
self.item = CodeItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_ID_ITEM" :
self.item = TypeIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_PROTO_ID_ITEM" :
self.item = ProtoIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_FIELD_ID_ITEM" :
self.item = FieldIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_METHOD_ID_ITEM" :
self.item = MethodIdItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DEF_ITEM" :
self.item = ClassDefItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_HEADER_ITEM" :
self.item = HeaderItem( self.size, buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_TYPE_LIST" :
self.item = [ TypeList( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_STRING_DATA_ITEM" :
self.item = [ StringDataItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_DEBUG_INFO_ITEM" :
self.item = DebugInfoItemEmpty( buff, cm )
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_ENCODED_ARRAY_ITEM" :
self.item = [ EncodedArrayItem( buff, cm ) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_CLASS_DATA_ITEM" :
self.item = [ ClassDataItem(buff, cm) for i in xrange(0, self.size) ]
elif TYPE_MAP_ITEM[ self.type ] == "TYPE_MAP_LIST" :
pass # It's me I think !!!
def reload(self) :
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
i.reload()
else :
self.item.reload()
def show(self) :
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
i.show()
else :
self.item.show()
def pretty_show(self) :
bytecode._Print( "\tMAP_TYPE_ITEM", TYPE_MAP_ITEM[ self.type ])
if self.item != None :
if isinstance( self.item, list ):
for i in self.item :
if isinstance(i, ClassDataItem) :
i.pretty_show()
else :
i.show()
else :
self.item.show()
def get_obj(self) :
return self.item
def get_raw(self) :
if isinstance(self.item, list) :
self.offset = self.item[0].get_off()
else :
self.offset = self.item.get_off()
return pack("=H", self.type) + pack("=H", self.unused) + pack("=I", self.size) + pack("=I", self.offset)
def get_length(self) :
return calcsize( "=HHII" )
def get_item(self) :
return self.item
def set_item(self, item) :
self.item = item
class OffObj :
def __init__(self, o) :
self.off = o
class ClassManager :
"""
This class is used to access to all elements (strings, type, proto ...) of the dex format
"""
def __init__(self, vm) :
self.vm = vm
self.buff = vm
self.decompiler_ob = None
self.vmanalysis_ob = None
self.gvmanalysis_ob = None
self.__manage_item = {}
self.__manage_item_off = []
self.__strings_off = {}
self.__obj_offset = {}
self.__item_offset = {}
self.__cached_type_list = {}
self.__cached_proto = {}
self.recode_ascii_string = CONF["RECODE_ASCII_STRING"]
self.recode_ascii_string_meth = CONF["RECODE_ASCII_STRING_METH"]
self.lazy_analysis = CONF["LAZY_ANALYSIS"]
self.hook_strings = {}
self.engine = []
self.engine.append("python")
if self.vm != None :
self.odex_format = self.vm.get_format_type() == "ODEX"
def get_odex_format(self) :
return self.odex_format
def get_obj_by_offset(self, offset) :
return self.__obj_offset[ offset ]
def get_item_by_offset(self, offset) :
return self.__item_offset[ offset ]
def get_string_by_offset(self, offset) :
return self.__strings_off[ offset ]
def get_lazy_analysis(self) :
return self.lazy_analysis
def get_vmanalysis(self) :
return self.vmanalysis_ob
def set_vmanalysis(self, vmanalysis) :
self.vmanalysis_ob = vmanalysis
def get_gvmanalysis(self) :
return self.gvmanalysis_ob
def set_gvmanalysis(self, gvmanalysis) :
self.gvmanalysis_ob = gvmanalysis
def set_decompiler(self, decompiler) :
self.decompiler_ob = decompiler
def get_engine(self) :
return self.engine[0]
def get_all_engine(self) :
return self.engine
def add_type_item(self, type_item, c_item, item) :
self.__manage_item[ type_item ] = item
self.__obj_offset[ c_item.get_off() ] = c_item
self.__item_offset[ c_item.get_offset() ] = item
sdi = False
if type_item == "TYPE_STRING_DATA_ITEM" :
sdi = True
if item != None :
if isinstance(item, list) :
for i in item :
goff = i.offset
self.__manage_item_off.append( goff )
self.__obj_offset[ i.get_off() ] = i
if sdi == True :
self.__strings_off[ goff ] = i
else :
self.__manage_item_off.append( c_item.get_offset() )
def get_code(self, idx) :
try :
return self.__manage_item[ "TYPE_CODE_ITEM" ].get_code( idx )
except KeyError :
return None
def get_class_data_item(self, off) :
for i in self.__manage_item[ "TYPE_CLASS_DATA_ITEM" ] :
if i.get_off() == off :
return i
bytecode.Exit( "unknown class data item @ 0x%x" % off )
def get_encoded_array_item(self, off) :
for i in self.__manage_item["TYPE_ENCODED_ARRAY_ITEM" ] :
if i.get_off() == off :
return i
def get_string(self, idx) :
if idx in self.hook_strings :
return self.hook_strings[ idx ]
try :
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError :
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try :
if self.recode_ascii_string :
return self.recode_ascii_string_meth( self.__strings_off[off].get() )
return self.__strings_off[off].get()
except KeyError :
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_raw_string(self, idx) :
try :
off = self.__manage_item[ "TYPE_STRING_ID_ITEM" ][idx].get_string_data_off()
except IndexError :
bytecode.Warning( "unknown string item @ %d" % (idx) )
return "AG:IS: invalid string"
try :
return self.__strings_off[off].get()
except KeyError :
bytecode.Warning( "unknown string item @ 0x%x(%d)" % (off,idx) )
return "AG:IS: invalid string"
def get_type_list(self, off) :
if off == 0 :
return "()"
if off in self.__cached_type_list :
return self.__cached_type_list[ off ]
for i in self.__manage_item[ "TYPE_TYPE_LIST" ] :
if i.get_type_list_off() == off :
ret = "(" + i.get_string() + ")"
self.__cached_type_list[ off ] = ret
return ret
return None
def get_type(self, idx) :
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
if _type == -1 :
return "AG:ITI: invalid type"
return self.get_string( _type )
def get_type_ref(self, idx) :
return self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( idx )
def get_proto(self, idx) :
try :
proto = self.__cached_proto[ idx ]
except KeyError :
proto = self.__manage_item[ "TYPE_PROTO_ID_ITEM" ].get( idx )
self.__cached_proto[ idx ] = proto
return [ proto.get_parameters_off_value(), proto.get_return_type_idx_value() ]
def get_field(self, idx) :
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
return [ field.get_class_name(), field.get_type(), field.get_name() ]
def get_field_ref(self, idx) :
return self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( idx )
def get_method(self, idx) :
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
return method.get_list()
def get_method_ref(self, idx) :
return self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( idx )
def set_hook_class_name(self, class_def, value) :
_type = self.__manage_item[ "TYPE_TYPE_ID_ITEM" ].get( class_def.get_class_idx() )
self.set_hook_string( _type, value )
self.vm._delete_python_export_class( class_def )
class_def.reload()
# FIXME
self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].reload()
for i in class_def.get_methods() :
i.reload()
for i in class_def.get_fields() :
i.reload()
self.vm._create_python_export_class( class_def )
def set_hook_method_name(self, encoded_method, value) :
method = self.__manage_item[ "TYPE_METHOD_ID_ITEM" ].get( encoded_method.get_method_idx() )
self.set_hook_string( method.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( method.get_class_idx() )
if class_def != None :
try :
name = "METHOD_" + bytecode.FormatNameToPython( encoded_method.get_name() )
delattr( class_def, name )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython( encoded_method.get_descriptor() )
delattr( class_def, name )
name = "METHOD_" + bytecode.FormatNameToPython( value )
setattr( class_def, name, encoded_method )
method.reload()
def set_hook_field_name(self, encoded_field, value) :
field = self.__manage_item[ "TYPE_FIELD_ID_ITEM" ].get( encoded_field.get_field_idx() )
self.set_hook_string( field.get_name_idx(), value )
class_def = self.__manage_item[ "TYPE_CLASS_DEF_ITEM" ].get_class_idx( field.get_class_idx() )
if class_def != None :
try :
name = "FIELD_" + bytecode.FormatNameToPython( encoded_field.get_name() )
delattr( class_def, name )
except AttributeError:
name += "_" + bytecode.FormatDescriptorToPython( encoded_field.get_descriptor() )
delattr( class_def, name )
name = "FIELD_" + bytecode.FormatNameToPython( value )
setattr( class_def, name, encoded_field )
field.reload()
def set_hook_string(self, idx, value) :
self.hook_strings[ idx ] = value
def get_next_offset_item(self, idx) :
for i in self.__manage_item_off :
if i > idx :
return i
return idx
def get_debug_off(self, off) :
self.buff.set_idx( off )
return DebugInfoItem( self.buff, self )
class MapList :
"""
This class can parse the "map_list" of the dex format
"""
def __init__(self, cm, off, buff) :
self.CM = cm
buff.set_idx( off )
self.offset = off
self.size = unpack("=I", buff.read( 4 ) )[0]
self.map_item = []
for i in xrange(0, self.size) :
idx = buff.get_idx()
mi = MapItem( buff, self.CM )
self.map_item.append( mi )
buff.set_idx( idx + mi.get_length() )
c_item = mi.get_item()
if c_item == None :
mi.set_item( self )
c_item = mi.get_item()
self.CM.add_type_item( TYPE_MAP_ITEM[ mi.get_type() ], mi, c_item )
for i in self.map_item :
i.reload()
def reload(self) :
pass
def get_off(self) :
return self.offset
def set_off(self, off) :
self.offset = off
def get_item_type(self, ttype) :
"""
Get a particular item type
:param ttype: a string which represents the desired type
:rtype: None or the item object
"""
for i in self.map_item :
if TYPE_MAP_ITEM[ i.get_type() ] == ttype :
return i.get_item()
return None
def show(self) :
"""
Print the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item :
if i.item != self :
i.show()
def pretty_show(self) :
"""
Print with a pretty display the MapList object
"""
bytecode._Print("MAP_LIST SIZE", self.size)
for i in self.map_item :
if i.item != self :
i.pretty_show()
def get_obj(self) :
return [ x.get_obj() for x in self.map_item ]
def get_raw(self) :
return pack("=I", self.size) + ''.join(x.get_raw() for x in self.map_item)
def get_class_manager(self) :
return self.CM
def get_length(self) :
return len(self.get_raw())
class XREF :
def __init__(self) :
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DREF :
def __init__(self) :
self.items = []
def add(self, x, y):
self.items.append((x, y))
class DalvikVMFormat(bytecode._Bytecode) :
"""
This class can parse a classes.dex file of an Android application (APK).
:param buff: a string which represents the classes.dex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikVMFormat( open("classes.dex", "rb").read() )
"""
def __init__(self, buff, decompiler=None) :
super(DalvikVMFormat, self).__init__( buff )
self.CM = ClassManager(self)
self.CM.set_decompiler( decompiler )
self._preload(buff)
self._load(buff)
def _preload(self, buff) :
pass
def _load(self, buff) :
self.__header = HeaderItem( 0, self, ClassManager(None) )
if self.__header.map_off == 0 :
bytecode.Warning( "no map list ..." )
else :
self.map_list = MapList( self.CM, self.__header.map_off, self )
self.classes = self.map_list.get_item_type( "TYPE_CLASS_DEF_ITEM" )
self.methods = self.map_list.get_item_type( "TYPE_METHOD_ID_ITEM" )
self.fields = self.map_list.get_item_type( "TYPE_FIELD_ID_ITEM" )
self.codes = self.map_list.get_item_type( "TYPE_CODE_ITEM" )
self.strings = self.map_list.get_item_type( "TYPE_STRING_DATA_ITEM" )
self.debug = self.map_list.get_item_type( "TYPE_DEBUG_INFO_ITEM" )
self.header = self.map_list.get_item_type( "TYPE_HEADER_ITEM" )
self.classes_names = None
self.__cache_methods = None
self.__cached_methods_idx = None
def get_classes_def_item(self) :
"""
This function returns the class def item
:rtype: :class:`ClassDefItem` object
"""
return self.classes
def get_methods_id_item(self) :
"""
This function returns the method id item
:rtype: :class:`MethodIdItem` object
"""
return self.methods
def get_fields_id_item(self) :
"""
This function returns the field id item
:rtype: :class:`FieldIdItem` object
"""
return self.fields
def get_codes_item(self) :
"""
This function returns the code item
:rtype: :class:`CodeItem` object
"""
return self.codes
def get_string_data_item(self) :
"""
This function returns the string data item
:rtype: :class:`StringDataItem` object
"""
return self.strings
def get_debug_info_item(self) :
"""
This function returns the debug info item
:rtype: :class:`DebugInfoItem` object
"""
return self.debug
def get_header_item(self) :
"""
This function returns the header item
:rtype: :class:`HeaderItem` object
"""
return self.header
def get_class_manager(self) :
"""
This function returns a ClassManager object which allow you to get
access to all index references (strings, methods, fields, ....)
:rtype: :class:`ClassManager` object
"""
return self.CM
def show(self) :
"""
Show the all information in the object
"""
self.map_list.show()
def pretty_show(self) :
"""
Show (but pretty !) the all information in the object
"""
self.map_list.pretty_show()
def save(self) :
"""
Return the dex (with the modifications) into raw format (fix checksums)
:rtype: string
"""
l = []
h = {}
s = {}
h_r = {}
idx = 0
for i in self.map_list.get_obj() :
length = 0
if isinstance(i, list) :
for j in i :
if isinstance(j, AnnotationsDirectoryItem) :
if idx % 4 != 0 :
idx = idx + (4 - (idx % 4))
l.append( j )
c_length = j.get_length()
h[ j ] = idx + length
h_r[ idx + length ] = j
s[ idx + length ] = c_length
length += c_length
#debug("SAVE" + str(j) + " @ 0x%x" % (idx+length))
debug("SAVE " + str(i[0]) + " @ 0x%x" % idx)
else :
if isinstance(i, MapList) :
if idx % 4 != 0 :
idx = idx + (4 - (idx % 4))
l.append( i )
h[ i ] = idx
h_r[ idx ] = i
length = i.get_length()
s[ idx ] = length
debug("SAVE " + str(i) + " @ 0x%x" % idx)
idx += length
self.header.file_size = idx
last_idx = 0
for i in l :
idx = h[ i ]
i.set_off( h[ i ] )
# print i, hex(h[ i ])
last_idx = idx + s[ idx ]
last_idx = 0
buff = ""
for i in l :
idx = h[ i ]
if idx != last_idx :
debug( "Adjust alignment @%x with 00 %x" % (idx, idx - last_idx) )
buff += "\x00" * (idx - last_idx)
buff += i.get_raw()
last_idx = idx + s[ idx ]
debug( "GLOBAL SIZE %d" % len(buff))
return self.fix_checksums(buff)
def fix_checksums(self, buff) :
"""
Fix a dex format buffer by setting all checksums
:rtype: string
"""
import zlib, hashlib
signature = hashlib.sha1(buff[32:]).digest()
buff = buff[:12] + signature + buff[32:]
checksum = zlib.adler32(buff[12:])
buff = buff[:8] + pack("=i", checksum) + buff[12:]
debug( "NEW SIGNATURE %s" % repr(signature) )
debug( "NEW CHECKSUM %x" % checksum )
return buff
def get_cm_field(self, idx) :
"""
Get a specific field by using an index
:param idx: index of the field
:type idx: int
"""
return self.CM.get_field(idx)
def get_cm_method(self, idx) :
"""
Get a specific method by using an index
:param idx: index of the method
:type idx: int
"""
return self.CM.get_method(idx)
def get_cm_string(self, idx) :
"""
Get a specific string by using an index
:param idx: index of the string
:type idx: int
"""
return self.CM.get_raw_string( idx )
def get_cm_type(self, idx) :
"""
Get a specific type by using an index
:param idx: index of the type
:type idx: int
"""
return self.CM.get_type( idx )
def get_classes_names(self) :
"""
Return the names of classes
:rtype: a list of string
"""
if self.classes_names == None :
self.classes_names = [ i.get_name() for i in self.classes.class_def ]
return self.classes_names
def get_classes(self) :
"""
Return all classes
:rtype: a list of :class:`ClassDefItem` objects
"""
return self.classes.class_def
def get_method(self, name) :
"""
Return a list all methods which corresponds to the regexp
:param name: the name of the method (a python regexp)
:rtype: a list with all :class:`EncodedMethod` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
if prog.match( j.get_name() ) :
l.append( j )
return l
def get_field(self, name) :
"""
Return a list all fields which corresponds to the regexp
:param name: the name of the field (a python regexp)
:rtype: a list with all :class:`EncodedField` objects
"""
prog = re.compile(name)
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
if prog.match( j.get_name() ) :
l.append( j )
return l
def get_all_fields(self) :
"""
Return a list of field items
:rtype: a list of :class:`FieldItem` objects
"""
try :
return self.fields.gets()
except AttributeError :
return []
def get_fields(self) :
"""
Return all field objects
:rtype: a list of :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
l.append( j )
return l
def get_methods(self) :
"""
Return all method objects
:rtype: a list of :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
l.append( j )
return l
def get_len_methods(self) :
"""
Return the number of methods
:rtype: int
"""
return len( self.get_methods() )
def get_method_by_idx(self, idx) :
"""
Return a specific method by using an index
:param idx: the index of the method
:type idx: int
:rtype: None or an :class:`EncodedMethod` object
"""
if self.__cached_methods_idx == None :
self.__cached_methods_idx = {}
for i in self.classes.class_def :
for j in i.get_methods() :
self.__cached_methods_idx[ j.get_method_idx() ] = j
try :
return self.__cached_methods_idx[ idx ]
except KeyError :
return None
def get_method_descriptor(self, class_name, method_name, descriptor) :
"""
Return the specific method
:param class_name: the class name of the method
:type class_name: string
:param method_name: the name of the method
:type method_name: string
:param descriptor: the descriptor of the method
:type descriptor: string
:rtype: None or a :class:`EncodedMethod` object
"""
key = class_name + method_name + descriptor
if self.__cache_methods == None :
self.__cache_methods = {}
for i in self.classes.class_def :
for j in i.get_methods() :
self.__cache_methods[ j.get_class_name() + j.get_name() + j.get_descriptor() ] = j
try :
return self.__cache_methods[ key ]
except KeyError :
return None
def get_methods_class(self, class_name) :
"""
Return all methods of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedMethod` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_methods() :
if class_name == j.get_class_name() :
l.append( j )
return l
def get_fields_class(self, class_name) :
"""
Return all fields of a specific class
:param class_name: the class name
:type class_name: string
:rtype: a list with :class:`EncodedField` objects
"""
l = []
for i in self.classes.class_def :
for j in i.get_fields() :
if class_name == j.get_class_name() :
l.append( j )
return l
def get_field_descriptor(self, class_name, field_name, descriptor) :
"""
Return the specific field
:param class_name: the class name of the field
:type class_name: string
:param field_name: the name of the field
:type field_name: string
:param descriptor: the descriptor of the field
:type descriptor: string
:rtype: None or a :class:`EncodedField` object
"""
for i in self.classes.class_def :
if class_name == i.get_name() :
for j in i.get_fields() :
if field_name == j.get_name() and descriptor == j.get_descriptor() :
return j
return None
def get_strings(self) :
"""
Return all strings
:rtype: a list with all strings used in the format (types, names ...)
"""
return [i.get() for i in self.strings]
def get_regex_strings(self, regular_expressions) :
"""
Return all target strings matched the regex
:param regular_expressions: the python regex
:type regular_expressions: string
:rtype: a list of strings matching the regex expression
"""
str_list = []
if regular_expressions.count is None :
return None
for i in self.get_strings() :
if re.match(regular_expressions, i) :
str_list.append(i)
return str_list
def get_format_type(self) :
"""
Return the type
:rtype: a string
"""
return "DEX"
def create_xref(self, python_export=True) :
"""
Create XREF for this object
:param python_export (boolean): export xref in each method
"""
gvm = self.CM.get_gvmanalysis()
for _class in self.get_classes() :
for method in _class.get_methods() :
method.XREFfrom = XREF()
method.XREFto = XREF()
key = "%s %s %s" % (method.get_class_name(), method.get_name(), method.get_descriptor())
if key in gvm.nodes :
for i in gvm.G.predecessors( gvm.nodes[ key ].id ) :
xref = gvm.nodes_id[ i ]
xref_meth = self.get_method_descriptor( xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None :
name = bytecode.FormatClassToPython( xref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( xref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( xref_meth.get_descriptor() )
if python_export == True :
setattr( method.XREFfrom, name, xref_meth )
method.XREFfrom.add( xref_meth, xref.edges[ gvm.nodes[ key ] ] )
for i in gvm.G.successors( gvm.nodes[ key ].id ) :
xref = gvm.nodes_id[ i ]
xref_meth = self.get_method_descriptor( xref.class_name, xref.method_name, xref.descriptor)
if xref_meth != None :
name = bytecode.FormatClassToPython( xref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( xref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( xref_meth.get_descriptor() )
if python_export == True :
setattr( method.XREFto, name, xref_meth )
method.XREFto.add( xref_meth, gvm.nodes[ key ].edges[ xref ] )
def create_dref(self, python_export=True) :
"""
Create DREF for this object
:param python_export (boolean): export dref in each field
"""
vmx = self.CM.get_vmanalysis()
for _class in self.get_classes() :
for field in _class.get_fields() :
field.DREFr = DREF()
field.DREFw = DREF()
paths = vmx.tainted_variables.get_field( field.get_class_name(), field.get_name(), field.get_descriptor() )
if paths != None :
access = {}
access["R"] = {}
access["W"] = {}
for path in paths.get_paths() :
access_val, idx = path[0]
m_idx = path[1]
if access_val == 'R' :
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True :
setattr( field.DREFr, name, dref_meth )
try :
access["R"][ dref_meth ].append( idx )
except KeyError :
access["R"][ dref_meth ] = []
access["R"][ dref_meth ].append( idx )
else :
dref_meth = self.get_method_by_idx( m_idx )
name = bytecode.FormatClassToPython( dref_meth.get_class_name() ) + "__" + \
bytecode.FormatNameToPython( dref_meth.get_name() ) + "__" + \
bytecode.FormatDescriptorToPython( dref_meth.get_descriptor() )
if python_export == True :
setattr( field.DREFw, name, dref_meth )
try :
access["W"][ dref_meth ].append( idx )
except KeyError :
access["W"][ dref_meth ] = []
access["W"][ dref_meth ].append( idx )
for i in access["R"] :
field.DREFr.add( i, access["R"][i] )
for i in access["W"] :
field.DREFw.add( i, access["W"][i] )
def create_python_export(self) :
"""
Export classes/methods/fields' names in the python namespace
"""
for _class in self.get_classes() :
self._create_python_export_class(_class)
def _delete_python_export_class(self, _class) :
self._create_python_export_class( _class, True)
def _create_python_export_class(self, _class, delete=False) :
if _class != None :
### Class
name = "CLASS_" + bytecode.FormatClassToPython( _class.get_name() )
if delete :
delattr( self, name )
return
else :
setattr( self, name, _class )
### Methods
m = {}
for method in _class.get_methods() :
if method.get_name() not in m :
m[ method.get_name() ] = []
m[ method.get_name() ].append( method )
for i in m :
if len(m[i]) == 1 :
j = m[i][0]
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else :
for j in m[i] :
name = "METHOD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
### Fields
f = {}
for field in _class.get_fields() :
if field.get_name() not in f :
f[ field.get_name() ] = []
f[ field.get_name() ].append( field )
for i in f :
if len(f[i]) == 1 :
j = f[i][0]
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() )
setattr( _class, name, j )
else :
for j in f[i] :
name = "FIELD_" + bytecode.FormatNameToPython( j.get_name() ) + "_" + bytecode.FormatDescriptorToPython( j.get_descriptor() )
setattr( _class, name, j )
def dotbuff(self, ins, idx) :
return dot_buff(ins, idx)
def get_BRANCH_DVM_OPCODES(self) :
return BRANCH_DVM_OPCODES
def get_determineNext(self) :
return determineNext
def get_determineException(self) :
return determineException
def get_DVM_TOSTRING(self) :
return DVM_TOSTRING()
def set_decompiler(self, decompiler) :
self.CM.set_decompiler( decompiler )
def set_vmanalysis(self, vmanalysis) :
self.CM.set_vmanalysis( vmanalysis )
def set_gvmanalysis(self, gvmanalysis) :
self.CM.set_gvmanalysis( gvmanalysis )
class OdexHeaderItem :
"""
This class can parse the odex header
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff) :
buff.set_idx(8)
self.dex_offset = unpack("=I", buff.read(4))[0]
self.dex_length = unpack("=I", buff.read(4))[0]
self.deps_offset = unpack("=I", buff.read(4))[0]
self.deps_length = unpack("=I", buff.read(4))[0]
self.aux_offset = unpack("=I", buff.read(4))[0]
self.aux_length = unpack("=I", buff.read(4))[0]
self.flags = unpack("=I", buff.read(4))[0]
self.padding = unpack("=I", buff.read(4))[0]
def show(self) :
print "dex_offset:%x dex_length:%x deps_offset:%x deps_length:%x aux_offset:%x aux_length:%x flags:%x" % (self.dex_offset,
self.dex_length,
self.deps_offset,
self.deps_length,
self.aux_offset,
self.aux_length,
self.flags)
class OdexDependencies :
"""
This class can parse the odex dependencies
:param buff: a Buff object string which represents the odex dependencies
"""
def __init__(self, buff) :
self.modification_time = unpack("=I", buff.read(4))[0]
self.crc = unpack("=I", buff.read(4))[0]
self.dalvik_build = unpack("=I", buff.read(4))[0]
self.dependency_count = unpack("=I", buff.read(4))[0]
self.dependencies = []
self.dependency_checksums = []
for i in range(0, self.dependency_count) :
string_length = unpack("=I", buff.read(4))[0]
name_dependency = buff.read( string_length )[:-1]
self.dependencies.append( name_dependency )
self.dependency_checksums.append( buff.read(20) )
def get_dependencies(self) :
"""
Return the list of dependencies
:rtype: a list of strings
"""
return self.dependencies
class DalvikOdexVMFormat(DalvikVMFormat):
"""
This class can parse an odex file
:param buff: a string which represents the odex file
:param decompiler: associate a decompiler object to display the java source code
:type buff: string
:type decompiler: object
:Example:
DalvikOdexVMFormat( open("classes.odex", "rb").read() )
"""
def _preload(self, buff):
magic = buff[:8]
if magic == ODEX_FILE_MAGIC_35 or magic == ODEX_FILE_MAGIC_36:
self.odex_header = OdexHeaderItem(self)
self.set_idx(self.odex_header.deps_offset)
self.dependencies = OdexDependencies(self)
self.set_idx(self.odex_header.dex_offset)
self.set_buff(self.read(self.odex_header.dex_length))
self.set_idx(0)
def get_dependencies(self):
"""
Return the odex dependencies object
:rtype: an OdexDependencies object
"""
return self.dependencies
def get_format_type(self):
"""
Return the type
:rtype: a string
"""
return "ODEX"
|
d9w/6858-android-intents
|
analyzer/androguard/core/bytecodes/dvm.py
|
Python
|
mit
| 232,587 | 0.025272 |
from gateway import Gateway, get_gateway
from integration import Integration, get_integration
from utils.credit_card import CreditCard
|
SimpleTax/merchant
|
billing/__init__.py
|
Python
|
bsd-3-clause
| 135 | 0 |
# -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
from django.core.urlresolvers import reverse
from ..core.models import TimeStampedModel
class TipoDiagnosticos(TimeStampedModel):
nombre = models.CharField(max_length=150, blank=False, null=False, verbose_name=u'Diagnóstico')
def get_absolute_url(self):
return reverse('diagnosticos:list')
def __unicode__(self):
return self.nombre
class Diagnosticos(TimeStampedModel):
tipo_diagnostico = models.ForeignKey(TipoDiagnosticos, blank=True, null=True,
verbose_name=u'Diagnóstico')
fecha = models.DateField(blank=False, null=False,
help_text=u'Formato: dd/mm/yyyy',
default=datetime.now())
hora = models.TimeField(blank=False, null=False,
help_text=u'Formato: hh:mm', default=datetime.now())
|
btenaglia/hpc-historias-clinicas
|
hpc-historias-clinicas/diagnosticos/models.py
|
Python
|
bsd-3-clause
| 945 | 0.003181 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-13 11:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0014_remove_menuimage_menu_name'),
]
operations = [
migrations.AlterModelOptions(
name='menuimage',
options={'verbose_name': 'MenuImage', 'verbose_name_plural': 'MenuImages'},
),
migrations.AlterField(
model_name='menuimage',
name='restaurant',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='menu_image', to='restaurant.Restaurant'),
),
]
|
midhun3112/restaurant_locator
|
Restaurant_Finder_App/restaurant_finder_app/restaurant_finder_app/restaurant/migrations/0015_auto_20170213_1116.py
|
Python
|
apache-2.0
| 766 | 0.002611 |
#!/usr/bin/env python
from Auth import *
keyId = plc_api.GetKeys(auth, {'person_id': 249241}, ['key_id', 'key'])
for key in keyId:
print "A new key:"
print "Key value ->", key['key']
print "Key id ->",key['key_id']
|
onelab-eu/myslice
|
forge/script/PlcApi/showKeys.py
|
Python
|
gpl-3.0
| 224 | 0.026786 |
###
# Copyright (c) 2012, Valentin Lorentz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
"""
Add a description of the plugin (to be presented to the user inside the wizard)
here. This should describe *what* the plugin does.
"""
import supybot
import supybot.world as world
# Use this for the version of this plugin. You may wish to put a CVS keyword
# in here if you're keeping the plugin in CVS or some similar system.
__version__ = ""
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.authors.unknown
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = '' # 'http://supybot.com/Members/yourname/TwitterStream/download'
from . import config
from . import plugin
from imp import reload
reload(plugin) # In case we're being reloaded.
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
kg-bot/SupyBot
|
plugins/TwitterStream/__init__.py
|
Python
|
gpl-3.0
| 2,725 | 0.000734 |
# -*- coding: utf-8 -*-
"""
Read Tweetworks API users from XML responses.
Nicolas Ward
@ultranurd
ultranurd@yahoo.com
http://www.ultranurd.net/code/tweetworks/
2009.06.19
"""
"""
This file is part of the Tweetworks Python API.
Copyright © 2009 Nicolas Ward
Tweetworks Python API is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Tweetworks Python API is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the Tweetworks Python API. If not, see http://www.gnu.org/licenses/
The term "Tweetworks" is Copyright © 2009 Tweetworks, LLC and is used
under license. See http://www.tweetworks.com/pages/terms
The use of this software requires a unique Tweetworks API key. You must be a
registered Tweetworks user, and have received an API key after requesting one
via http://www.tweetworks.com/pages/contact.
The term "Twitter" is Copyright © 2009 Twitter, Inc.
"""
# System includes
import lxml.etree
from lxml.builder import E
class User:
"""
Represents the data fields of a single Tweetworks user.
"""
def __init__(self, xml = None):
"""
Reads user fields from the XML, or create an empty user.
id - int - Tweetworks numeric user ID
username - string - Tweetworks/Twitter username
avatar_url - string - Twitter avatar URL
twitter_id - int - Twitter numeric user ID
"""
# Initialize an empty user if no XML was provided
if xml == None:
self.id = None
self.username = ""
self.avatar_url = ""
self.twitter_id = None
return
# User ID
self.id = int(xml.xpath("/user/id/text()")[0])
# User's Twitter username
self.username = unicode(xml.xpath("/user/username/text()")[0])
# User avatar URL (loaded from Amazon S3, obtained from Twitter)
self.avatar_url = unicode(xml.xpath("/user/avatar_url/text()")[0])
# User's "real" name
self.name = unicode(xml.xpath("/user/name/text()")[0])
# Twitter ID of the user; this should always be present but isn't always
twitter_id = xml.xpath("/user/twitter_id/text()")
if len(twitter_id) == 1:
self.twitter_id = int(twitter_id[0])
else:
self.twitter_id = None
def __str__(self):
"""
Returns this User as an XML string.
"""
# Get the XML tree and stringify
return lxml.etree.tostring(self.xml())
def __repr__(self):
"""
Returns an eval-ready string for this User's constructor.
"""
return "tweetworks.User(lxml.etree.parsestring(%s))" % repr(str(self))
def xml(self):
"""
Generates an XML element tree for this User.
"""
# Construct the XML tree representing this User
xml = E("user",
E("id", str(self.id)),
E("username", self.username),
E("avatar_url", self.avatar_url),
E("name", self.name),
E("twitter_id",
("", str(self.twitter_id))[self.twitter_id != None]),
)
# Return the XML tree (NOT a string)
return xml
|
UltraNurd/tweetworks-py
|
tweetworks/User.py
|
Python
|
gpl-3.0
| 3,623 | 0.002762 |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-storagepool"
PACKAGE_PPRINT_NAME = "Storage Pool Management"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
install_requires=[
'msrest>=0.6.21',
'azure-common~=1.1',
'azure-mgmt-core>=1.3.0,<2.0.0',
],
python_requires=">=3.6"
)
|
Azure/azure-sdk-for-python
|
sdk/storagepool/azure-mgmt-storagepool/setup.py
|
Python
|
mit
| 2,679 | 0.001493 |
'''
Created on 5/02/2010
@author: henry@henryjenkins.name
'''
import datetime
class user(object):
'''
classdocs
'''
dataUp = None
dataDown = None
macAddress = ""
name = ""
def __init__(self, mac="", name=""):
'''
Constructor
'''
self.name = name
self.dataUp = {}
self.dataDown = {}
self.macAddress = mac
def getData(self, type, date=None, peak='other'):
'''
Method to retrieve data for either a set date, or the total data used by user
Return int, data used by this user
'''
data = 0
if date == None:
data = user.__getTotalData(self, type, peak)
else:
data = self.getDownData(type = type, date = date, peak = peak)
date += self.getUpData(type = type, date = date, peak = peak)
return data
def __getTotalData(self, type, peak='other'):
totalData = self.__getTotalUpData(type, peak)
totalData = totalData + self.__getTotalDownData(peak = peak, type = type)
return totalData
def getUpData(self, type, date=None, peak='other'):
data = 0
if date == None:
data = self.__getTotalUpData(type = type, peak = peak)
elif date in self.dataUp:
if type == 'on' or type == 'off':
data = self.dataUp[date][peak][type]
else:
data = self.dataUp[date]['on'][type] + self.dataUp[date]['off'][type]
return data
def __getTotalUpData(self, type, peak='other'):
dataTotal = 0
for date, data in self.dataUp.items():
if peak == 'on' or peak == 'off':
dataTotal += data[peak][type]
else:
dataTotal += data['on'][type]
dataTotal += data['off'][type]
return dataTotal
def getDownData(self, type, date=None, peak='other'):
data = 0
if date == None:
data = self.__getTotalDownData(type = type, peak = peak)
elif date in self.dataDown:
if type == 'on' or type == 'off':
data = self.dataDown[date][peak][type]
else:
data = self.dataDown[date]['on'][type] + self.dataDown[date]['off'][type]
return data
def __getTotalDownData(self, type, peak='other'):
dataTotal = 0
for date, data in self.dataDown.items():
if peak == 'on' or peak == 'off':
dataTotal += data[peak][type]
else:
dataTotal += data['on'][type]
dataTotal += data['off'][type]
return dataTotal
def addData(self, date=None, data=0, pkts=0, peak='on', direction='up'):
if direction == 'up':
self.addUpData(date, data, pkts, peak)
elif direction == 'down':
self.addDownData(date, data, pkts, peak)
def addUpData(self, date=None, data=0, pkts=0, peak='on'): #TODO store packets
date = self.__checkDate(date)
if date not in self.dataUp:# Check if data for date already
self.dataUp[date] = {
'on': {'data': 0, 'pkts': 0},
'off': {'data': 0, 'pkts': 0}
}
self.dataUp[date][peak]['data'] += int(data)
self.dataUp[date][peak]['pkts'] += int(pkts)
def addDownData(self, date=None, data=0, pkts=0, peak='on'): #TODO store packets
date = self.__checkDate(date)
if date not in self.dataDown:# Check if data for date already
self.dataDown[date] = {
'on': {'data': 0, 'pkts': 0},
'off': {'data': 0, 'pkts': 0}
}
self.dataDown[date][peak]['data'] += int(data)
self.dataDown[date][peak]['pkts'] += int(pkts)
'''
Helper method
'''
def __checkDate(self, localDate=None):
if localDate == None:
localDate = datetime.date.today()
return localDate
def setMac(self, mac=None):
self.macAddress = mac
def setName(self, name=None):
self.name = name
|
steakunderscore/Bandwidth-Monitoring
|
src/user.py
|
Python
|
gpl-3.0
| 4,271 | 0.012175 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 Zadara Storage Inc.
# Copyright (c) 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Built-in volume type properties."""
from oslo.config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common.db import exception as db_exc
from cinder.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create(context, name, extra_specs={}):
"""Creates volume types."""
try:
type_ref = db.volume_type_create(context,
dict(name=name,
extra_specs=extra_specs))
except db_exc.DBError as e:
LOG.exception(_('DB error: %s') % e)
raise exception.VolumeTypeCreateFailed(name=name,
extra_specs=extra_specs)
return type_ref
def destroy(context, id):
"""Marks volume types as deleted."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
else:
db.volume_type_destroy(context, id)
def get_all_types(context, inactive=0, search_opts={}):
"""Get all non-deleted volume_types.
Pass true as argument if you want deleted volume types returned also.
"""
vol_types = db.volume_type_get_all(context, inactive)
if search_opts:
LOG.debug(_("Searching by: %s") % str(search_opts))
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
for type_name, type_args in vol_types.iteritems():
# go over all filters in the list
for opt, values in search_opts.iteritems():
try:
filter_func = filter_mapping[opt]
except KeyError:
# no such filter - ignore it, go to next filter
continue
else:
if filter_func(type_args, values):
result[type_name] = type_args
break
vol_types = result
return vol_types
def get_volume_type(ctxt, id):
"""Retrieves single volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return db.volume_type_get(ctxt, id)
def get_volume_type_by_name(context, name):
"""Retrieves single volume type by name."""
if name is None:
msg = _("name cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_get_by_name(context, name)
def get_default_volume_type():
"""Get the default volume type."""
name = CONF.default_volume_type
vol_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
vol_type = get_volume_type_by_name(ctxt, name)
except exception.VolumeTypeNotFoundByName as e:
# Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on
#TODO(zhiteng) consider add notification to warn admin
LOG.exception(_('Default volume type is not found, '
'please check default_volume_type config: %s'), e)
return vol_type
def is_key_value_present(volume_type_id, key, value, volume_type=None):
if volume_type_id is None:
return False
if volume_type is None:
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
if (volume_type.get('extra_specs') is None or
volume_type['extra_specs'].get(key) != value):
return False
else:
return True
def get_volume_type_extra_specs(volume_type_id, key=False):
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
extra_specs = volume_type['extra_specs']
if key:
if extra_specs.get(key):
return extra_specs.get(key)
else:
return False
else:
return extra_specs
def is_encrypted(context, volume_type_id):
if volume_type_id is None:
return False
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption is not None
def get_volume_type_qos_specs(volume_type_id):
ctxt = context.get_admin_context()
res = db.volume_type_qos_specs_get(ctxt,
volume_type_id)
return res
|
ntt-sic/cinder
|
cinder/volume/volume_types.py
|
Python
|
apache-2.0
| 5,726 | 0.000175 |
import unittest
import pystache
from pystache import Renderer
from examples.nested_context import NestedContext
from examples.complex import Complex
from examples.lambdas import Lambdas
from examples.template_partial import TemplatePartial
from examples.simple import Simple
from pystache.tests.common import EXAMPLES_DIR
from pystache.tests.common import AssertStringMixin
class TestSimple(unittest.TestCase, AssertStringMixin):
def test_nested_context(self):
renderer = Renderer()
view = NestedContext(renderer)
view.template = '{{#foo}}{{thing1}} and {{thing2}} and {{outer_thing}}{{/foo}}{{^foo}}Not foo!{{/foo}}'
actual = renderer.render(view)
self.assertString(actual, u"one and foo and two")
def test_looping_and_negation_context(self):
template = '{{#item}}{{header}}: {{name}} {{/item}}{{^item}} Shouldnt see me{{/item}}'
context = Complex()
renderer = Renderer()
actual = renderer.render(template, context)
self.assertEqual(actual, "Colors: red Colors: green Colors: blue ")
def test_empty_context(self):
template = '{{#empty_list}}Shouldnt see me {{/empty_list}}{{^empty_list}}Should see me{{/empty_list}}'
self.assertEqual(pystache.Renderer().render(template), "Should see me")
def test_callables(self):
view = Lambdas()
view.template = '{{#replace_foo_with_bar}}foo != bar. oh, it does!{{/replace_foo_with_bar}}'
renderer = Renderer()
actual = renderer.render(view)
self.assertString(actual, u'bar != bar. oh, it does!')
def test_rendering_partial(self):
renderer = Renderer(search_dirs=EXAMPLES_DIR)
view = TemplatePartial(renderer=renderer)
view.template = '{{>inner_partial}}'
actual = renderer.render(view)
self.assertString(actual, u'Again, Welcome!')
view.template = '{{#looping}}{{>inner_partial}} {{/looping}}'
actual = renderer.render(view)
self.assertString(actual, u"Again, Welcome! Again, Welcome! Again, Welcome! ")
def test_non_existent_value_renders_blank(self):
view = Simple()
template = '{{not_set}} {{blank}}'
self.assertEqual(pystache.Renderer().render(template), ' ')
def test_template_partial_extension(self):
"""
Side note:
From the spec--
Partial tags SHOULD be treated as standalone when appropriate.
In particular, this means that trailing newlines should be removed.
"""
renderer = Renderer(search_dirs=EXAMPLES_DIR, file_extension='txt')
view = TemplatePartial(renderer=renderer)
actual = renderer.render(view)
self.assertString(actual, u"""Welcome
-------
## Again, Welcome! ##""")
|
zzeleznick/zDjango
|
venv/lib/python2.7/site-packages/pystache/tests/test_simple.py
|
Python
|
mit
| 2,785 | 0.002154 |
"""
Read graphs in LEDA format.
See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
"""
# Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2009 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__all__ = ['read_leda', 'parse_leda']
import networkx
from networkx.exception import NetworkXException, NetworkXError
from networkx.utils import _get_fh, is_string_like
def read_leda(path):
"""Read graph in GraphML format from path.
Returns an XGraph or XDiGraph."""
fh=_get_fh(path,mode='r')
G=parse_leda(fh)
return G
def parse_leda(lines):
"""Parse LEDA.GRAPH format from string or iterable.
Returns an Graph or DiGraph."""
if is_string_like(lines): lines=iter(lines.split('\n'))
lines = iter([line.rstrip('\n') for line in lines \
if not (line.startswith('#') or line.startswith('\n') or line=='')])
for i in range(3):
lines.next()
# Graph
du = int(lines.next()) # -1 directed, -2 undirected
if du==-1:
G = networkx.DiGraph()
else:
G = networkx.Graph()
# Nodes
n =int(lines.next()) # number of vertices
node={}
for i in range(1,n+1): # LEDA counts from 1 to n
symbol=lines.next().rstrip().strip('|{}| ')
if symbol=="": symbol=str(i) # use int if no label - could be trouble
node[i]=symbol
G.add_nodes_from([s for i,s in node.items()])
# Edges
m = int(lines.next()) # number of edges
for i in range(m):
try:
s,t,reversal,label=lines.next().split()
except:
raise NetworkXError,\
'Too few fields in LEDA.GRAPH edge %d' % (i+1)
# BEWARE: no handling of reversal edges
G.add_edge(node[int(s)],node[int(t)],label=label[2:-2])
return G
|
JaneliaSciComp/Neuroptikon
|
Source/lib/CrossPlatform/networkx/readwrite/leda.py
|
Python
|
bsd-3-clause
| 2,097 | 0.023367 |
from model.contact import Contact
from model.group import Group
from fixture.orm import ORMFixture
import random
def test_del_contact_from_group(app):
orm = ORMFixture(host="127.0.0.1", name="addressbook", user="root", password="")
# check for existing any group
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="test"))
group = random.choice(orm.get_group_list()) # choose random group from list
if len(orm.get_contacts_in_group(Group(id=group.id))) == 0:
if len(orm.get_contacts_not_in_group(Group(id=group.id))) == 0:
app.contact.create(Contact(firstname="Ivan"))
contact_not_in_group = random.choice(orm.get_contacts_not_in_group(Group(id=group.id)))
app.contact.add_contact_to_group_by_id(contact_not_in_group.id, group.id)
old_contacts_in_group = orm.get_contacts_in_group(Group(id=group.id))
contact_in_group = random.choice(old_contacts_in_group) # choose random contact from list
app.contact.delete_contact_from_group_by_id(contact_in_group.id, group.id)
new_contacts_in_group = orm.get_contacts_in_group(Group(id=group.id))
old_contacts_in_group.remove(contact_in_group)
assert sorted(old_contacts_in_group, key=Contact.id_or_max) == sorted(new_contacts_in_group, key=Contact.id_or_max)
|
Lana-Pa/Python-training
|
test/test_delete_contact_from_group.py
|
Python
|
apache-2.0
| 1,311 | 0.006865 |
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# jsonrpc - jsonrpc interface for XBMC-compatible remotes
# -----------------------------------------------------------------------
# $Id$
#
# JSONRPC and XBMC eventserver to be used for XBMC-compatible
# remotes. Only tested with Yatse so far. If something is not working,
# do not blame the remote, blame this plugin.
#
# Not all API calls are implemented yet.
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2014 Dirk Meyer, et al.
#
# First Edition: Dirk Meyer <https://github.com/Dischi>
# Maintainer: Dirk Meyer <https://github.com/Dischi>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# ----------------------------------------------------------------------- */
# python imports
import os
import logging
import socket
import urllib
# kaa imports
import kaa
import kaa.beacon
# freevo imports
from ... import core as freevo
# get logging object
log = logging.getLogger('freevo')
# generic functions
import utils
import eventserver
# jsonrpc callbacks
import videolibrary as VideoLibrary
import player as Player
import playlist as Playlist
class PluginInterface( freevo.Plugin ):
"""
JSONRPC and XBMC eventserver to be used for XBMC-compatible remotes
"""
@kaa.coroutine()
def plugin_activate(self, level):
"""
Activate the plugin
"""
super(PluginInterface, self).plugin_activate(level)
self.httpserver = freevo.get_plugin('httpserver')
if not self.httpserver:
raise RuntimeError('httpserver plugin not running')
self.httpserver.server.add_json_handler('/jsonrpc', self.jsonrpc)
self.httpserver.server.add_handler('/image/', self.provide_image)
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sock.bind(('', freevo.config.plugin.jsonrpc.eventserver))
udp = kaa.Socket()
udp.wrap(self._sock, kaa.IO_READ | kaa.IO_WRITE)
udp.signals['read'].connect(eventserver.handle)
utils.imagedir = (yield kaa.beacon.get_db_info())['directory']
utils.cachedir = os.path.join(os.environ['HOME'], '.thumbnails')
self.api = {}
for module in ('VideoLibrary', 'Player', 'Playlist'):
for name in dir(eval(module)):
method = getattr(eval(module), name)
if callable(method) and not name.startswith('_'):
self.api[module + '.' + name] = method
@kaa.coroutine()
def provide_image(self, path, **attributes):
"""
HTTP callback for images
"""
filename = ''
path = urllib.unquote(path)
if path.startswith('beacon'):
filename = os.path.join(utils.imagedir, path[7:])
if path.startswith('cache'):
filename = os.path.join(utils.cachedir, path[6:])
if path.startswith('thumbnail'):
item = yield kaa.beacon.query(id=int(path.split('/')[2]), type=path.split('/')[1])
if len(item) != 1:
log.error('beacon returned wrong results')
yield None
thumbnail = item[0].get('thumbnail')
if thumbnail.needs_update or 1:
yield kaa.inprogress(thumbnail.create(priority=kaa.beacon.Thumbnail.PRIORITY_HIGH))
filename = thumbnail.large
if filename:
if os.path.isfile(filename):
yield open(filename).read(), None, None
log.error('no file: %s' % filename)
yield None
else:
yield None
def Application_GetProperties(self, properties):
"""
JsonRPC Callback Application.GetProperties
"""
result = {}
for prop in properties:
if prop == 'version':
result[prop] = {"major": 16,"minor": 0,"revision": "a5f3a99", "tag": "stable"}
elif prop == 'volume':
result[prop] = 100
elif prop == 'muted':
result[prop] = eventserver.muted
else:
raise AttributeError('unsupported property: %s' % prop)
return result
def Settings_GetSettingValue(self, setting):
"""
JsonRPC Settings.GetSettingValue (MISSING)
"""
return {}
def XBMC_GetInfoBooleans(self, booleans):
"""
JsonRPC Callback XBMC.GetInfoBooleans
"""
result = {}
for b in booleans:
if b == 'System.Platform.Linux':
result[b] = True
else:
result[b] = False
return result
def XBMC_GetInfoLabels(self, labels):
"""
JsonRPC Callback XBMC.GetInfoLabels
"""
result = {}
for l in labels:
# FIXME: use correct values for all these labels
if l == 'System.BuildVersion':
result[l] = "13.1"
elif l == 'System.KernelVersion':
result[l] = "Linux 3.11.0"
elif l == 'MusicPlayer.Codec':
result[l] = ""
elif l == 'MusicPlayer.SampleRate':
result[l] = ""
elif l == 'MusicPlayer.BitRate':
result[l] = ""
else:
raise AttributeError('unsupported label: %s' % l)
return result
def XBMC_Ping(self):
"""
JsonRPC Ping
"""
return ''
def JSONRPC_Ping(self):
"""
JsonRPC Ping
"""
return ''
def GUI_ActivateWindow(self, window, parameters=None):
"""
Switch Menu Type
"""
window = window.lower()
if window == 'pictures':
freevo.Event(freevo.MENU_GOTO_MEDIA).post('image', event_source='user')
elif window == 'musiclibrary':
freevo.Event(freevo.MENU_GOTO_MEDIA).post('audio', event_source='user')
elif window == 'videos':
if parameters and parameters[0] == 'MovieTitles':
freevo.Event(freevo.MENU_GOTO_MEDIA).post('video', 'movie', event_source='user')
if parameters and parameters[0] == 'TvShowTitles':
freevo.Event(freevo.MENU_GOTO_MEDIA).post('video', 'tv', event_source='user')
elif window == 'home':
freevo.Event(freevo.MENU_GOTO_MAINMENU).post(event_source='user')
else:
log.error('ActivateWindow: unsupported window: %s' % window)
@kaa.coroutine()
def jsonrpc(self, path, **attributes):
"""
HTTP callback for /jsonrpc
"""
if not attributes:
# supported XBMC API version
yield {"major": 6,"minor": 14,"patch": 3}
method = attributes.get('method')
params = attributes.get('params')
result = None
if method.startswith('Input'):
callback = eventserver.input(method[6:].lower(), params)
yield {'jsonrpc': '2.0', 'result': 'OK', 'id': attributes.get('id')}
callback = self.api.get(method, None) or getattr(self, method.replace('.', '_'), None)
if callback:
# log.info('%s(%s)' % (method, params))
if params is None:
result = callback()
else:
result = callback(**params)
if isinstance(result, kaa.InProgress):
result = yield result
else:
raise AttributeError('unsupported method: %s' % method)
yield {'jsonrpc': '2.0', 'result': result, 'id': attributes.get('id')}
|
freevo/freevo2
|
src/plugins/jsonrpc/__init__.py
|
Python
|
gpl-2.0
| 8,340 | 0.003357 |
#!/usr/bin/env python
#
# Copyright (C) 2011 Andy Aschwanden
#
# This file is part of PISM.
#
# PISM is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# PISM is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License
# along with PISM; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import time
import numpy as np
from pyproj import Proj
from sys import stderr
write = stderr.write
# try different netCDF modules
try:
from netCDF4 import Dataset as CDF
except:
from netCDF3 import Dataset as CDF
from optparse import OptionParser
__author__ = "Andy Aschwanden"
# Create PISM-readable input file from Storglaciaren DEM
parser = OptionParser()
parser.usage = "usage: %prog [options]"
parser.description = "Preprocess Storglaciaren files."
(options, args) = parser.parse_args()
# Create PISM-readable input file from Storglaciaren DEM
write('------------------------------\n')
write('PISM-Storglaciaren example\n')
write('------------------------------\n')
# data dir
data_dir = './'
# Bed and Surface DEMs for Storglaciaren
XFile = data_dir + 'X.txt.gz'
YFile = data_dir + 'Y.txt.gz'
zBaseFile = data_dir + 'zBase.txt.gz'
zSurfFile = data_dir + 'zSurf.txt.gz'
# load coordinate information. Note: Swedish grid (RT90) uses inverse notation
# X -> northing, Y -> easting
try:
write('Reading northing coordinate infos from %s: ' % XFile)
X = np.loadtxt(XFile)
write('Done.\n')
write('Reading easting coordinate infos from %s: ' % YFile)
Y = np.loadtxt(YFile)
write('Done.\n')
except IOError:
write('ERROR: File %s or %s could not be found.\n' % XFile % YFile)
exit(2)
# load Bed DEM
try:
write('Reading DEM from %s: ' % zBaseFile)
zBase = np.loadtxt(zBaseFile)
write('Done.\n')
except IOError:
write('ERROR: File %s could not be found.\n' % zBaseFile)
exit(2)
# load Surface DEM
try:
write('Reading DEM from %s: ' % zSurfFile)
zSurf = np.loadtxt(zSurfFile)
write('Done.\n')
except IOError:
write('ERROR: File %s could not be found.\n' % zSurfFile)
exit(2)
# Grid size. DEM has 10m spacing.
N = zBase.shape[1]
M = zBase.shape[0]
e0 = Y.min()
n0 = X.min()
de = 10 # m
dn = 10 # m
e1 = e0 + (N-1)*de
n1 = n0 + (M-1)*dn
easting = np.linspace(e0, e1, N)
northing = np.linspace(n0, n1, M)
# convert to lat/lon
# From http://lists.maptools.org/pipermail/proj/2008-December/004165.html:
#
# However, a simpler method, now recommended by the Swedish Land Survey
# instead of a 7-parameter shift, is to start from the WGS84 datum, and than
# tweak the projection parameters a little: just use a Transverse Mercator
# with
# central meridian: 15" 48' 22.624306" E
# scale factor: 1.00000561024
# false easting: 1500064.274 m
# false northing: -667.711 m
# ( http://www.lantmateriet.se/templates/LMV_Page.aspx?id=5197&lang=EN )
projRT90 = "+proj=tmerc +datum=WGS84 +lon_0=-15.806284 +x_0=1500064.274 +y_0=-667.711 +k=1.00000561024 +units=m"
ee, nn = np.meshgrid(easting, northing)
projection = Proj(projRT90)
longitude, latitude = projection(ee, nn, inverse=True)
write("Coordinates of the lower-left grid corner:\n"
" easting = %.0f\n"
" northing = %.0f\n"
"Grid size:\n"
" rows = %d\n"
" columns = %d\n" % (e0, n0, N, M))
# Fill value
fill_value = -9999
bed_valid_min = -5000.0
thk_valid_min = 0.0
bed = np.flipud(zBase)
dem = np.flipud(zSurf) # ignored by bootstrapping
thk = np.flipud(zSurf-zBase) # used for bootstrapping
# Replace NaNs with zeros
thk = np.nan_to_num(thk)
# There are some negative thickness values
# Quick and dirty: set to zero
# some inconsistencies in the original data still needs to be sorted out
# (filtering)
thk[thk<0] = 0
# Output filename
ncfile = 'pism_storglaciaren_3d.nc'
# Write the data:
nc = CDF(ncfile, "w",format='NETCDF3_CLASSIC') # for netCDF4 module
# Create dimensions x and y
nc.createDimension("x", size=easting.shape[0])
nc.createDimension("y", size=northing.shape[0])
x = nc.createVariable("x", 'f4', dimensions=("x",))
x.units = "m";
x.long_name = "easting"
x.standard_name = "projection_x_coordinate"
y = nc.createVariable("y", 'f4', dimensions=("y",))
y.units = "m";
y.long_name = "northing"
y.standard_name = "projection_y_coordinate"
x[:] = easting
y[:] = northing
def def_var(nc, name, units, fillvalue):
var = nc.createVariable(name, 'f', dimensions=("y", "x"),fill_value=fillvalue)
var.units = units
return var
lon_var = def_var(nc, "lon", "degrees_east", None)
lon_var.standard_name = "longitude"
lon_var[:] = longitude
lat_var = def_var(nc, "lat", "degrees_north", None)
lat_var.standard_name = "latitude"
lat_var[:] = latitude
bed_var = def_var(nc, "topg", "m", fill_value)
bed_var.valid_min = bed_valid_min
bed_var.standard_name = "bedrock_altitude"
bed_var.coordinates = "lat lon"
bed_var[:] = bed
thk_var = def_var(nc, "thk", "m", fill_value)
thk_var.valid_min = thk_valid_min
thk_var.standard_name = "land_ice_thickness"
thk_var.coordinates = "lat lon"
thk_var[:] = thk
dem_var = def_var(nc, "usurf_from_dem", "m", fill_value)
dem_var.standard_name = "surface_altitude"
dem_var.coordinates = "lat lon"
dem_var[:] = dem
# generate (somewhat) reasonable acab
acab_max = 2.5 # m/a
acab_min = -3.0 # m/a
acab_up = easting.min() + 200 # m; location of upstream end of linear acab
acab_down = easting.max() - 600 # m;location of downstream end of linear acab
acab = np.ones_like(dem)
acab[:] = acab_max - (acab_max-acab_min) * (easting - acab_up) / (acab_down - acab_up)
acab[thk<1] = acab_min
acab_var = def_var(nc, "climatic_mass_balance", "m year-1", fill_value)
acab_var.standard_name = "land_ice_surface_specific_mass_balance"
acab_var[:] = acab
# Set boundary conditions for Scandinavian-type polythermal glacier
# ------------------------------------------------------------------------------
#
# (A) Surface temperature for temperature equation bc
T0 = 273.15 # K
Tma = -6.0 # degC, mean annual air temperature at Tarfala
zcts = 1300 # m a.s.l.; altitude where CTS is at the surface, projected to topg
slope = 100 # m; range around which surface temp transition happens
# old abrupt jump:
#artm = np.zeros((M,N),float) + T0
#artm[bed<zcts] = T0 + Tma # Scandinavian-type polythermal glacier
# smoothed version; FIXME: can't we at least have it depend on initial DEM?
# additional lapse rate?
artm = T0 + Tma * (zcts + slope - bed) / (2.0 * slope)
artm[bed<zcts-slope] = T0 + Tma
artm[bed>zcts+slope] = T0
artm_var = def_var(nc, "ice_surface_temp", "K", fill_value)
artm_var[:] = artm
# set global attributes
nc.Conventions = "CF-1.4"
historysep = ' '
historystr = time.asctime() + ': ' + historysep.join(sys.argv) + '\n'
setattr(nc, 'history', historystr)
nc.projection = projRT90
nc.close()
write('Done writing NetCDF file %s!\n' % ncfile)
|
JohannesFeldmann/pism
|
examples/storglaciaren/sg_create_3d.py
|
Python
|
gpl-2.0
| 7,325 | 0.006416 |
#!/usr/bin/python
"""This script lists classes and optionally attributes from UML model created
with Gaphor."""
import optparse
import sys
from gaphor import UML
from gaphor.application import Session
# Setup command line options.
usage = "usage: %prog [options] file.gaphor"
def main():
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-a",
"--attributes",
dest="attrs",
action="store_true",
help="Print class attributes",
)
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
# The model file to load.
model = args[0]
# Create the Gaphor application object.
session = Session()
# Get services we need.
element_factory = session.get_service("element_factory")
file_manager = session.get_service("file_manager")
# Load model from file.
file_manager.load(model)
# Find all classes using factory select.
for cls in element_factory.select(UML.Class):
print(f"Found class {cls.name}")
if options.attrs:
for attr in cls.ownedAttribute:
print(f" Attribute: {attr.name}")
if __name__ == "__main__":
main()
|
amolenaar/gaphor
|
examples/list_classes.py
|
Python
|
lgpl-2.1
| 1,239 | 0 |
from .rest import RestClient
class Rules(object):
"""Rules endpoint implementation.
Args:
domain (str): Your Auth0 domain, e.g: 'username.auth0.com'
token (str): Management API v2 Token
telemetry (bool, optional): Enable or disable Telemetry
(defaults to True)
timeout (float or tuple, optional): Change the requests
connect and read timeout. Pass a tuple to specify
both values separately or a float to set both to it.
(defaults to 5.0 for both)
rest_options (RestClientOptions): Pass an instance of
RestClientOptions to configure additional RestClient
options, such as rate-limit retries.
(defaults to None)
"""
def __init__(self, domain, token, telemetry=True, timeout=5.0, protocol="https", rest_options=None):
self.domain = domain
self.protocol = protocol
self.client = RestClient(jwt=token, telemetry=telemetry, timeout=timeout, options=rest_options)
def _url(self, id=None):
url = '{}://{}/api/v2/rules'.format(self.protocol, self.domain)
if id is not None:
return '{}/{}'.format(url, id)
return url
def all(self, stage='login_success', enabled=True, fields=None,
include_fields=True, page=None, per_page=None, include_totals=False):
"""Retrieves a list of all rules.
Args:
stage (str, optional): Retrieves rules that match the execution stage.
Defaults to login_success.
enabled (bool, optional): If provided, retrieves rules that match
the value, otherwise all rules are retrieved.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result. Leave empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise. Defaults to True.
page (int, optional): The result's page number (zero based). When not set,
the default value is up to the server.
per_page (int, optional): The amount of entries per page. When not set,
the default value is up to the server.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise. Defaults to False.
See: https://auth0.com/docs/api/management/v2#!/Rules/get_rules
"""
params = {
'stage': stage,
'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower(),
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower()
}
# since the default is True, this is here to disable the filter
if enabled is not None:
params['enabled'] = str(enabled).lower()
return self.client.get(self._url(), params=params)
def create(self, body):
"""Creates a new rule.
Args:
body (dict): Attributes for the newly created rule.
See: https://auth0.com/docs/api/v2#!/Rules/post_rules
"""
return self.client.post(self._url(), data=body)
def get(self, id, fields=None, include_fields=True):
"""Retrieves a rule by its ID.
Args:
id (str): The id of the rule to retrieve.
fields (list, optional): A list of fields to include or exclude
(depending on include_fields) from the result. Leave empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise. Defaults to True.
See: https://auth0.com/docs/api/management/v2#!/Rules/get_rules_by_id
"""
params = {'fields': fields and ','.join(fields) or None,
'include_fields': str(include_fields).lower()}
return self.client.get(self._url(id), params=params)
def delete(self, id):
"""Delete a rule.
Args:
id (str): The id of the rule to delete.
See: https://auth0.com/docs/api/management/v2#!/Rules/delete_rules_by_id
"""
return self.client.delete(self._url(id))
def update(self, id, body):
"""Update an existing rule
Args:
id (str): The id of the rule to modify.
body (dict): Attributes to modify.
See: https://auth0.com/docs/api/v2#!/Rules/patch_rules_by_id
"""
return self.client.patch(self._url(id), data=body)
|
auth0/auth0-python
|
auth0/v3/management/rules.py
|
Python
|
mit
| 4,742 | 0.002109 |
import unittest
import os
from katello.tests.core.action_test_utils import CLIOptionTestCase, CLIActionTestCase
from katello.tests.core.organization import organization_data
from katello.tests.core.template import template_data
import katello.client.core.template
from katello.client.core.template import Delete
from katello.client.api.utils import ApiDataError
class RequiredCLIOptionsTests(CLIOptionTestCase):
#requires: organization, name
#optional: environment (defaults to Library)
action = Delete()
disallowed_options = [
('--environment=dev', '--name=template_1'),
('--environment=dev', '--org=ACME'),
]
allowed_options = [
('--org=ACME', '--name=template_1'),
('--org=ACME', '--environment=dev', '--name=template_1'),
]
class TemplateInfoTest(CLIActionTestCase):
ORG = organization_data.ORGS[0]
ENV = organization_data.ENVS[0]
TPL = template_data.TEMPLATES[0]
OPTIONS = {
'org': ORG['name'],
'environment': ENV['name'],
'name': TPL['name'],
}
def setUp(self):
self.set_action(Delete())
self.set_module(katello.client.core.template)
self.mock_printer()
self.mock_options(self.OPTIONS)
self.mock(self.module, 'get_template', self.TPL)
self.mock(self.action.api, 'delete')
def test_it_finds_the_template(self):
self.run_action()
self.module.get_template.assert_called_once_with(self.ORG['name'], self.ENV['name'], self.TPL['name'])
def test_it_returns_error_when_template_not_found(self):
self.mock(self.module, 'get_template').side_effect = ApiDataError
self.run_action(os.EX_DATAERR)
def test_it_returns_success_when_template_found(self):
self.run_action(os.EX_OK)
def test_it_calls_delete_api(self):
self.run_action()
self.action.api.delete.assert_called_once_with(self.TPL['id'])
|
iNecas/katello
|
cli/test/katello/tests/core/template/template_delete_test.py
|
Python
|
gpl-2.0
| 1,936 | 0.002583 |
from datetime import datetime
from flask import Blueprint, jsonify, request
from app.dao.fact_notification_status_dao import (
get_total_notifications_for_date_range,
)
from app.dao.fact_processing_time_dao import (
get_processing_time_percentage_for_date_range,
)
from app.dao.services_dao import get_live_services_with_organisation
from app.errors import register_errors
from app.performance_dashboard.performance_dashboard_schema import (
performance_dashboard_request,
)
from app.schema_validation import validate
performance_dashboard_blueprint = Blueprint('performance_dashboard', __name__, url_prefix='/performance-dashboard')
register_errors(performance_dashboard_blueprint)
@performance_dashboard_blueprint.route('')
def get_performance_dashboard():
# All statistics are as of last night this matches the existing performance platform
# and avoids the need to query notifications.
if request.args:
# Is it ok to reuse this? - should probably create a new one
validate(request.args, performance_dashboard_request)
# If start and end date are not set, we are expecting today's stats.
today = str(datetime.utcnow().date())
start_date = datetime.strptime(request.args.get('start_date', today), '%Y-%m-%d').date()
end_date = datetime.strptime(request.args.get('end_date', today), '%Y-%m-%d').date()
total_for_all_time = get_total_notifications_for_date_range(start_date=None, end_date=None)
total_notifications, emails, sms, letters = transform_results_into_totals(total_for_all_time)
totals_for_date_range = get_total_notifications_for_date_range(start_date=start_date, end_date=end_date)
processing_time_results = get_processing_time_percentage_for_date_range(start_date=start_date, end_date=end_date)
services = get_live_services_with_organisation()
stats = {
"total_notifications": total_notifications,
"email_notifications": emails,
"sms_notifications": sms,
"letter_notifications": letters,
"notifications_by_type": transform_into_notification_by_type_json(totals_for_date_range),
"processing_time": transform_processing_time_results_to_json(processing_time_results),
"live_service_count": len(services),
"services_using_notify": transform_services_to_json(services)
}
return jsonify(stats)
def transform_results_into_totals(total_notifications_results):
total_notifications = 0
emails = 0
sms = 0
letters = 0
for x in total_notifications_results:
total_notifications += x.emails
total_notifications += x.sms
total_notifications += x.letters
emails += x.emails
sms += x.sms
letters += x.letters
return total_notifications, emails, sms, letters
def transform_into_notification_by_type_json(total_notifications):
j = []
for x in total_notifications:
j.append({"date": x.bst_date, "emails": x.emails, "sms": x.sms, "letters": x.letters})
return j
def transform_processing_time_results_to_json(processing_time_results):
j = []
for x in processing_time_results:
j.append({"date": x.date, "percentage_under_10_seconds": x.percentage})
return j
def transform_services_to_json(services_results):
j = []
for x in services_results:
j.append({"service_id": x.service_id, "service_name": x.service_name,
"organisation_id": x.organisation_id, "organisation_name": x.organisation_name}
)
return j
|
alphagov/notifications-api
|
app/performance_dashboard/rest.py
|
Python
|
mit
| 3,531 | 0.003398 |
# This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
import bpy
from bpy.props import *
from PyHSPlasma import *
from .base import PlasmaModifierProperties
from ..prop_world import game_versions
from ...exporter import ExportError
from ... import idprops
class PlasmaVersionedNodeTree(idprops.IDPropMixin, bpy.types.PropertyGroup):
name = StringProperty(name="Name")
version = EnumProperty(name="Version",
description="Plasma versions this node tree exports under",
items=game_versions,
options={"ENUM_FLAG"},
default=set(list(zip(*game_versions))[0]))
node_tree = PointerProperty(name="Node Tree",
description="Node Tree to export",
type=bpy.types.NodeTree)
node_name = StringProperty(name="Node Ref",
description="Attach a reference to this node")
@classmethod
def _idprop_mapping(cls):
return {"node_tree": "node_tree_name"}
def _idprop_sources(self):
return {"node_tree_name": bpy.data.node_groups}
class PlasmaAdvancedLogic(PlasmaModifierProperties):
pl_id = "advanced_logic"
bl_category = "Logic"
bl_label = "Advanced"
bl_description = "Plasma Logic Nodes"
bl_icon = "NODETREE"
logic_groups = CollectionProperty(type=PlasmaVersionedNodeTree)
active_group_index = IntProperty(options={"HIDDEN"})
def export(self, exporter, bo, so):
version = exporter.mgr.getVer()
for i in self.logic_groups:
our_versions = [globals()[j] for j in i.version]
if version in our_versions:
if i.node_tree is None:
raise ExportError("'{}': Advanced Logic is missing a node tree for '{}'".format(bo.name, i.version))
# If node_name is defined, then we're only adding a reference. We will make sure that
# the entire node tree is exported once before the post_export step, however.
if i.node_name:
exporter.want_node_trees[i.node_tree.name] = (bo, so)
node = i.node_tree.nodes.get(i.node_name, None)
if node is None:
raise ExportError("Node '{}' does not exist in '{}'".format(i.node_name, i.node_tree.name))
# We are going to assume get_key will do the adding correctly. Single modifiers
# should fetch the appropriate SceneObject before doing anything, so this will
# be a no-op in that case. Multi modifiers should accept any SceneObject, however
node.get_key(exporter, so)
else:
exporter.node_trees_exported.add(i.node_tree.name)
i.node_tree.export(exporter, bo, so)
def harvest_actors(self):
actors = set()
for i in self.logic_groups:
actors.update(i.node_tree.harvest_actors())
return actors
class PlasmaSpawnPoint(PlasmaModifierProperties):
pl_id = "spawnpoint"
bl_category = "Logic"
bl_label = "Spawn Point"
bl_description = "Point at which avatars link into the Age"
def export(self, exporter, bo, so):
# Not much to this modifier... It's basically a flag that tells the engine, "hey, this is a
# place the avatar can show up." Nice to have a simple one to get started with.
spawn = exporter.mgr.add_object(pl=plSpawnModifier, so=so, name=self.key_name)
@property
def requires_actor(self):
return True
class PlasmaMaintainersMarker(PlasmaModifierProperties):
pl_id = "maintainersmarker"
bl_category = "Logic"
bl_label = "Maintainer's Marker"
bl_description = "Designates an object as the D'ni coordinate origin point of the Age."
bl_icon = "OUTLINER_DATA_EMPTY"
calibration = EnumProperty(name="Calibration",
description="State of repair for the Marker",
items=[
("kBroken", "Broken",
"A marker which reports scrambled coordinates to the KI."),
("kRepaired", "Repaired",
"A marker which reports blank coordinates to the KI."),
("kCalibrated", "Calibrated",
"A marker which reports accurate coordinates to the KI.")
])
def export(self, exporter, bo, so):
maintmark = exporter.mgr.add_object(pl=plMaintainersMarkerModifier, so=so, name=self.key_name)
maintmark.calibration = getattr(plMaintainersMarkerModifier, self.calibration)
@property
def requires_actor(self):
return True
|
dpogue/korman
|
korman/properties/modifiers/logic.py
|
Python
|
gpl-3.0
| 5,501 | 0.003272 |
__author__ = 'ganeshchand'
import re
def regex_search(pattern_string, string_source):
if re.search(pattern_string,string_source):
print("%s matched %s" % (pattern_string, string_source))
else:
print("%s did not match %s" % (pattern_string, string_source))
# matching a pattern in one string
mystring_anchors = 'aaaaa!@#$!@#$aaaaaadefg'
pattern_withoutanchors = r'@#\$!' # $ sign needs escaping if it doesn't represent need to represent its special meaning.
# It is an anchor reserved character - it marks the end of the string
# that means, if you say aab$ , you are looking for a string that that ends with pattern aab
# there should be absolutely nothing beyond aab
regex_search(pattern_withoutanchors, mystring_anchors)
pattern_withanchors = r'defg$'
regex_search(pattern_withanchors, mystring_anchors)
# patterns to be matched
patterns = ["defg$", "^d", "^a", "^a*!"]
# defg$ : string must end with defg
# ^d: must begin with d
# ^a: must begin with
# ^a*!: must beging with a followed by any number of characters and !
for patterntobematched in patterns:
regex_search(patterntobematched, mystring_anchors)
# matching a pattern in an array of string
|
ganeshchand/python3
|
advanced/regular_expression/regular_expresion_anchors.py
|
Python
|
apache-2.0
| 1,306 | 0.011485 |
#!/usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2012-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
"""
Use a set of query reads to sweep out overlapping reads from another file.
% python scripts/sweep-reads2.py <query reads> <search reads>
Results end up in <search reads>.sweep2.
Use '-h' for parameter help.
"""
import sys
import khmer
import os.path
import screed
from khmer import khmer_args
from khmer.khmer_args import (build_nodegraph_args, DEFAULT_MAX_TABLESIZE)
from khmer.utils import broken_paired_reader, write_record
def main():
parser = build_nodegraph_args()
parser.add_argument('-o', '--outfile',
help='output file; default is "infile".sweep2')
parser.add_argument('-q', '--quiet')
parser.add_argument('input_filename')
parser.add_argument('read_filename')
args = parser.parse_args()
inp = args.input_filename
readsfile = args.read_filename
outfile = os.path.basename(readsfile) + '.sweep2'
if args.outfile:
outfile = args.outfile
outfp = open(outfile, 'w')
# create a nodegraph data structure
ht = khmer_args.create_countgraph(args)
# load contigs, connect into N partitions
print('loading input reads from', inp)
ht.consume_seqfile(inp)
print('starting sweep.')
m = 0
K = ht.ksize()
instream = screed.open(readsfile)
for n, is_pair, read1, read2 in broken_paired_reader(instream):
if n % 10000 == 0:
print('...', n, m)
if is_pair:
count1 = ht.get_median_count(read1.sequence)[0]
count2 = ht.get_median_count(read2.sequence)[0]
if count1 or count2:
m += 1
write_record_pair(read1, read2, outfp)
else:
count = ht.get_median_count(read1.sequence)[0]
if count:
m += 1
write_record(read1, outfp)
if __name__ == '__main__':
main()
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
|
souravsingh/khmer
|
sandbox/sweep-reads2.py
|
Python
|
bsd-3-clause
| 3,734 | 0.000268 |
#!/usr/bin/env python
# encoding: utf-8
# -------------------------------------------------------------------------------
# version: ??
# author: fernando
# license: MIT License
# contact: iw518@163.com
# purpose: views
# date: 2016-12-14
# copyright: copyright 2016 Xu, Aiwu
# -------------------------------------------------------------------------------
from flask import redirect, url_for, render_template
from app.model.models import Team
from . import hr
from .forms import RegisterForm
@hr.route('/team_manage', methods=['POST', 'GET'])
def team_manage():
form = RegisterForm()
if form.validate_on_submit():
Team(job_id=form.job_selections.data, user_id=form.user_selections.data)
return redirect(url_for('order.employee'))
return render_template('hr/team_manage.html', form=form)
|
iw518/fernando
|
app/main/hr/views.py
|
Python
|
gpl-3.0
| 859 | 0.001164 |
#!/usr/bin/env python
# encoding: utf-8
'A simple client for accessing api.ly.g0v.tw.'
import json
import unittest
try:
import urllib.request as request
import urllib.parse as urlparse
except:
import urllib2 as request
import urllib as urlparse
def assert_args(func, *args):
def inner(*args):
required_arg = args[1]
assert(len(required_arg) > 0)
return func(*args)
return inner
class LY_G0V_Client:
BASE_URL = 'http://api-beta.ly.g0v.tw/v0/'
# BASE_URL = 'http://api.ly.g0v.tw/v0/'
def _fetch_data(self, url_path):
URL = LY_G0V_Client.BASE_URL + url_path
try:
f = request.urlopen(URL)
r = f.read()
r = r.decode('utf-8')
return json.loads(r)
except Exception as e:
print("Failed to call " + URL)
raise e
def fetch_all_bills(self):
'Fetch all bills.'
return self._fetch_data('collections/bills')
def fetch_all_motions(self):
'Fetch all motions.'
return self._fetch_data('collections/motions')
def fetch_all_sittings(self):
'Fetch all sittings.'
return self._fetch_data('collections/sittings')
@assert_args
def fetch_bill(self, bill_id):
'Fetch metadata of a specific bill.'
return self._fetch_data('collections/bills/' + str(bill_id))
@assert_args
def fetch_bill_data(self, bill_id):
'Fetch data of a specific bill.'
assert(len(bill_id) > 0)
return self._fetch_data('collections/bills/' + str(bill_id) + '/data')
@assert_args
def fetch_motions_related_with_bill(self, bill_id):
'Fetch motions related with a specific bill.'
query = json.dumps({'bill_ref': bill_id})
query = urlparse.quote(query)
return self._fetch_data('collections/motions/?q='+query)
@assert_args
def fetch_sitting(self, sitting_id):
'Fetch metadata of a specific bill.'
return self._fetch_data('collections/bills/' + str(bill_id))
class TestClient(unittest.TestCase):
def setUp(self):
import time
time.sleep(1)
self.client = LY_G0V_Client()
def _test_bill(self, bill):
self.assertTrue(isinstance(bill, dict), str(type(bill)))
keys = ('proposed_by', 'doc', 'abstract', 'sponsors',
'summary', 'bill_ref', 'motions', 'cosponsors',
'bill_id');
for key in keys:
self.assertTrue(key in bill)
if isinstance(bill['doc'], dict):
self.assertTrue('pdf' in bill['doc'])
self.assertTrue('doc' in bill['doc'])
def _test_bills(self, bills):
for key in ('entries', 'paging'):
self.assertTrue(key in bills)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in bills['paging'])
for bill in bills['entries']:
self._test_bill(bill)
def _test_motion(self, motion):
self.assertTrue(isinstance(motion, dict), str(type(motion)))
keys = ('result', 'resolution', 'motion_class', 'bill_id',
'agenda_item', 'bill_ref', 'tts_id',
'subitem', 'status', 'sitting_id', 'item',
'summary', 'tts_seq', 'proposed_by', 'doc')
for key in keys:
self.assertTrue(key in motion, key)
if isinstance(motion['doc'], dict):
self.assertTrue('pdf' in motion['doc'])
self.assertTrue('doc' in motion['doc'])
def _test_motions(self, motions):
self.assertTrue(isinstance(motions, dict), str(type(motions)))
for key in ('entries', 'paging'):
self.assertTrue(key in motions)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in motions['paging'])
for motion in motions['entries']:
self._test_motion(motion)
def _test_data(self, data):
for key in ('related', 'content'):
self.assertTrue(key in data)
self.assertTrue(isinstance(data['related'], list))
self.assertTrue(isinstance(data['content'], list))
for item in data['content']:
content_keys = ('name', 'type', 'content', 'header')
for content_key in content_keys:
self.assertTrue(content_key in item)
self.assertTrue(len(item['name']) > 0)
self.assertTrue(isinstance(item['name'], str) or \
isinstance(item['name'], unicode))
self.assertTrue(len(item['type']) > 0)
self.assertTrue(isinstance(item['type'], str) or \
isinstance(item['type'], unicode))
self.assertTrue(len(item['content']) > 0)
self.assertTrue(isinstance(item['content'], list))
for content in item['content']:
self.assertTrue(isinstance(content, list))
for line in content:
self.assertTrue(isinstance(line, str))
self.assertTrue(len(item['header']) > 0)
self.assertTrue(isinstance(item['header'], list))
for header in item['header']:
self.assertTrue(isinstance(header, str) or \
isinstance(header, unicode))
def _test_sitting(self, sitting):
self.assertTrue(isinstance(sitting, dict), str(type(sitting)))
keys = ('dates', 'ad', 'videos', 'extra', 'motions',
'sitting', 'summary', 'session', 'committee', 'id',
'name')
for key in keys:
self.assertTrue(key in sitting, key)
def _test_sittings(self, sittings):
self.assertTrue(isinstance(sittings, dict), str(type(sittings)))
for key in ('entries', 'paging'):
self.assertTrue(key in sittings)
for key in ('l', 'sk', 'count'):
self.assertTrue(key in sittings['paging'])
for sitting in sittings['entries']:
self._test_sitting(sitting)
def test_all_bills(self):
bills = self.client.fetch_all_bills()
self._test_bills(bills)
def test_all_motions(self):
motions = self.client.fetch_all_motions()
self._test_motions(motions)
def test_all_sittings(self):
sittings = self.client.fetch_all_sittings()
self._test_sittings(sittings)
def test_fetch_bill(self):
bill = self.client.fetch_bill('1021021071000400')
self._test_bill(bill)
def test_fetch_bill_data(self):
data = self.client.fetch_bill_data('1021021071000400')
self._test_data(data)
def test_fetch_motions_related_with_bill(self):
motions = self.client.fetch_motions_related_with_bill('1021021071000400')
self._test_motions(motions)
if __name__ == '__main__':
unittest.main()
|
zonble/lyg0vtw_client.py
|
lyg0vtw_client/lyg0vtw_client.py
|
Python
|
gpl-2.0
| 5,779 | 0.029071 |
import redis
class BetaRedis(redis.StrictRedis):
def georadius(self, name, *values):
return self.execute_command('GEORADIUS', name, *values)
def geoadd(self, name, *values):
return self.execute_command('GEOADD', name, *values)
def geopos(self, name, *values):
return self.execute_command('GEOPOS', name, *values)
class RedisHeatMap:
REDIS_KEY = 'heatmap'
REDIS_KEY_GEO = REDIS_KEY + '_GEO'
REDIS_KEY_HASH = REDIS_KEY + '_HASH'
def __init__(self, host='localhost', port=6379, db=0):
self.r = BetaRedis(host=host, port=port, db=db)
self.r.flushdb()
def gen(self, data, distance=200000, min_sum=1):
for point in data:
try:
res = self.r.georadius(self.REDIS_KEY_GEO, point['lng'], point['lat'], distance, 'm')
if not res:
self.r.geoadd(self.REDIS_KEY_GEO, point['lng'], point['lat'], point['key'])
self.r.hset(self.REDIS_KEY_HASH, point['key'], 1)
else:
self.r.hincrby(self.REDIS_KEY_HASH, res[0])
except redis.exceptions.ResponseError as e:
pass
for key in self.r.hscan_iter(self.REDIS_KEY_HASH):
lng, lat = map(lambda x: x.decode(), self.r.geopos(self.REDIS_KEY_GEO, key[0].decode())[0])
if int(key[1]) >= min_sum:
yield {'key': key[0].decode(), 'lat': lat, 'lng': lng, 'sum': int(key[1])}
|
gdelt-analysis/worker
|
src/HeatMap.py
|
Python
|
gpl-3.0
| 1,473 | 0.002716 |
import copy
from corehq.pillows.case import CasePillow
from corehq.pillows.mappings.reportcase_mapping import REPORT_CASE_MAPPING, REPORT_CASE_INDEX
from django.conf import settings
from .base import convert_property_dict
class ReportCasePillow(CasePillow):
"""
Simple/Common Case properties Indexer
an extension to CasePillow that provides for indexing of custom case properties
"""
es_alias = "report_cases"
es_type = "report_case"
es_index = REPORT_CASE_INDEX
default_mapping = REPORT_CASE_MAPPING
def get_unique_id(self):
return self.calc_meta()
def change_transform(self, doc_dict):
if self.get_domain(doc_dict) not in getattr(settings, 'ES_CASE_FULL_INDEX_DOMAINS', []):
#full indexing is only enabled for select domains on an opt-in basis
return None
doc_ret = copy.deepcopy(doc_dict)
convert_property_dict(doc_ret, self.default_mapping, override_root_keys=['_id', 'doc_type', '_rev', '#export_tag'])
return doc_ret
|
puttarajubr/commcare-hq
|
corehq/pillows/reportcase.py
|
Python
|
bsd-3-clause
| 1,031 | 0.00582 |
from django.urls import include, path
from django.contrib import admin
urlpatterns = [
path('', include('orcamentos.core.urls', namespace='core')),
path('crm/', include('orcamentos.crm.urls', namespace='crm')),
path('proposal/', include('orcamentos.proposal.urls', namespace='proposal')),
path('admin/', admin.site.urls),
]
|
rg3915/orcamentos
|
orcamentos/urls.py
|
Python
|
mit
| 341 | 0.002933 |
import imghdr
from wsgiref.util import FileWrapper
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.http import (
HttpResponse,
HttpResponsePermanentRedirect,
StreamingHttpResponse,
)
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.decorators import classonlymethod
from django.views.generic import View
from wagtail.images import get_image_model
from wagtail.images.exceptions import InvalidFilterSpecError
from wagtail.images.models import SourceImageIOError
from wagtail.images.utils import generate_signature, verify_signature
from wagtail.utils.sendfile import sendfile
def generate_image_url(image, filter_spec, viewname="wagtailimages_serve", key=None):
signature = generate_signature(image.id, filter_spec, key)
url = reverse(viewname, args=(signature, image.id, filter_spec))
url += image.file.name[len("original_images/") :]
return url
class ServeView(View):
model = get_image_model()
action = "serve"
key = None
@classonlymethod
def as_view(cls, **initkwargs):
if "action" in initkwargs:
if initkwargs["action"] not in ["serve", "redirect"]:
raise ImproperlyConfigured(
"ServeView action must be either 'serve' or 'redirect'"
)
return super(ServeView, cls).as_view(**initkwargs)
def get(self, request, signature, image_id, filter_spec, filename=None):
if not verify_signature(
signature.encode(), image_id, filter_spec, key=self.key
):
raise PermissionDenied
image = get_object_or_404(self.model, id=image_id)
# Get/generate the rendition
try:
rendition = image.get_rendition(filter_spec)
except SourceImageIOError:
return HttpResponse(
"Source image file not found", content_type="text/plain", status=410
)
except InvalidFilterSpecError:
return HttpResponse(
"Invalid filter spec: " + filter_spec,
content_type="text/plain",
status=400,
)
return getattr(self, self.action)(rendition)
def serve(self, rendition):
# Open and serve the file
rendition.file.open("rb")
image_format = imghdr.what(rendition.file)
return StreamingHttpResponse(
FileWrapper(rendition.file), content_type="image/" + image_format
)
def redirect(self, rendition):
# Redirect to the file's public location
return HttpResponsePermanentRedirect(rendition.url)
serve = ServeView.as_view()
class SendFileView(ServeView):
backend = None
def serve(self, rendition):
return sendfile(self.request, rendition.file.path, backend=self.backend)
|
wagtail/wagtail
|
wagtail/images/views/serve.py
|
Python
|
bsd-3-clause
| 2,857 | 0.0014 |
#
# SVGdatashapes_dt 0.3.6 SVGdatashapes.com github.com/pepprseed/svgdatashapes
# Copyright 2016-8 Stephen C. Grubb stevegrubb@gmail.com MIT License
#
# This module provides date / time support for svgdatashapes
#
import svgdatashapes
from svgdatashapes import p_dtformat
import collections
import datetime as d
import time
import calendar
class AppDt_Error(Exception): pass
def dateformat( format=None ):
# set the format string to be used for parsing datetimes found in the input data
# format codes explained here: https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior
# Note that when they say zero-padded this refers to output only; parsing can handle eg. 3/4/2015
global p_dtformat
if format == None: raise AppDt_Error( "dateformat() expecting 'format' arg" )
p_dtformat = format
return True
def toint( dateval=None ):
# for the given date/time string in whatever format, return the int utime value
# toint( "1970-01-01.00:00" ) == 0
if dateval == None: return None
try:
tt = d.datetime.strptime( dateval, p_dtformat ).timetuple() # parse out the components
utime = calendar.timegm( tt )
except: raise AppDt_Error( "toint() got bad datetime value: " + str(dateval) + " (expecting format of " + p_dtformat + ")" )
return utime
def make( utime, fmt=None ):
# format the given dt value as per fmt...
if utime == None: return None
if fmt == None: fmt = p_dtformat
try:
# tt = time.gmtime( utime )
outstr = d.datetime.utcfromtimestamp(utime).strftime( fmt )
except: raise AppDt_Error( "nicedt error on utime: " + str(utime) + " and format: " + p_dtformat )
return outstr
def datediff( val1, val2, result="days" ):
# return integer number of days difference (dt1 - dt2)
try: dt1 = d.datetime.strptime( val1, p_dtformat )
except: raise AppDt_Error( "datediff() invalid val1 arg: " + str(val1) )
try: dt2 = d.datetime.strptime( val2, p_dtformat )
except: raise AppDt_Error( "datediff() invalid val2 arg: " + str(val2) )
if result != "seconds":
dt1 = dt1.replace( second=0, microsecond=0 )
dt2 = dt2.replace( second=0, microsecond=0 )
if result == "days":
dt1 = dt1.replace( hour=0, minute=0 )
dt2 = dt2.replace( hour=0, minute=0 )
div = 86400
elif result == "hours":
dt1 = dt1.replace( minute=0 )
dt2 = dt2.replace( minute=0 )
div = 3600
elif result == "minutes": div = 60
elif result == "seconds": div = 1
return int(calendar.timegm( dt1.timetuple() ) - calendar.timegm( dt2.timetuple() ) ) / div
def daterange( column=None, datarows=None, nearest=None, inc=None, stubformat=None,
inc2=None, stub2format=None, stub2place="append", stub2first=True ):
dfindex = svgdatashapes._getdfindex( column, datarows )
if nearest == None: raise AppDt_Error( "findrange() requires a nearest= arg " )
if inc == None: inc = nearest
# if inc != nearest:
# if nearest == "year" and inc == "month": pass
# elif nearest == "month" and inc == "day": pass
# elif nearest == "day" and inc == "hour": pass
# else: raise AppDt_Error( "findrange() invalid nearest= and inc= combination" )
if stubformat == None: stubformat = p_dtformat
# find raw min and max
dmin = 999999999999999999999999999; dmax = -999999999999999999999999999;
for row in datarows:
if dfindex == -1: strval = row[column] # dict rows
else: strval = row[dfindex]
utime = toint( strval )
if utime < dmin: dmin = utime
if utime > dmax: dmax = utime
dtmin = d.datetime.utcfromtimestamp( dmin ).replace( second=0, microsecond=0 ) # always zero out seconds and ms
dtmax = d.datetime.utcfromtimestamp( dmax ).replace( second=0, microsecond=0 )
if nearest[-6:] != "minute": dtmin.replace( minute=0 ); dtmax.replace( minute=0 ) # usually zero out minutes
if nearest == "year":
dtmin = dtmin.replace( month=1, day=1, hour=0 )
yr = dtmax.year;
dtmax = dtmax.replace( year=yr+1, month=1, day=1, hour=0 )
elif nearest == "3month":
newmon = ((dtmin.month / 4) * 3) + 1
dtmin = dtmin.replace( month=newmon, day=1, hour=0 )
newmon = (((dtmax.month / 4)+1) * 3) + 1
yr = dtmax.year
if newmon >= 12: newmon = 1; yr += 1;
dtmax = dtmax.replace( year=yr, month=newmon, day=1, hour=0 )
elif nearest == "month":
dtmin = dtmin.replace( day=1, hour=0 )
mon = dtmax.month; yr = dtmax.year;
if mon == 12: dtmax = dtmax.replace( year=yr+1, month=1, day=1, hour=0 )
else: dtmax = dtmax.replace( month=mon+1, day=1, hour=0 )
elif nearest == "week" or nearest[:8] == "week_day": # week = Monday-based week; or week_dayN where N=1 for Tues; N=6 for Sun, etc
wday = time.gmtime( dmin ).tm_wday # struct_time tm_wday convention is that 0 = monday
dmin -= (wday*86400) # move timestamp back by necessary no. of days to reach opening week boundary (86400 sec per day)
if nearest[:8] == "week_day": dmin -= ((7 - int(nearest[-1:])) * 86400)
dtmin = d.datetime.utcfromtimestamp( dmin ).replace( hour=0 )
wday = 7 - time.gmtime( dmax ).tm_wday
dmax += (wday*86400) # move timestamp fwd by necessary no. of days to reach the next week boundary
if nearest[:8] == "week_day": dmax += ((7 - int(nearest[-1:])) * 86400)
dtmax = d.datetime.utcfromtimestamp( dmax ).replace( hour=0 )
elif nearest == "day":
dtmin = dtmin.replace( hour=0 )
dmax += 86400 # jump forward one day
dtmax = d.datetime.utcfromtimestamp( dmax ).replace( hour=0 )
elif nearest in ["12hour", "6hour", "4hour", "3hour"]:
nhr = int(nearest[:-4])
newhr = (dtmin.hour / nhr) * nhr
dtmin = dtmin.replace( hour=newhr )
newhr = ((dtmax.hour / nhr)+1) * nhr
day = dtmax.day
if newhr >= 24: newhr = 0; day += 1
dtmax = dtmax.replace( day=day, hour=newhr )
elif nearest == "hour":
dtmin = dtmin.replace( minute=0 )
hr = dtmax.hour
if hr == 23:
dmax += 3600 # jump forward one hour (there are 3600 sec per hour)
dtmax = d.datetime.utcfromtimestamp( dmax ) # no replace necessary
else: dtmax = dtmax.replace( hour=hr+1, minute=0 )
elif nearest in [ "30minute", "10minute" ]:
nmin = int(nearest[:-6])
newmin = (dtmin.minute / nmin ) * nmin
dtmin = dtmin.replace( minute=newmin )
newmin = ((dtmax.minute / nmin)+1) * nmin
hr = dtmax.hour
if newmin >= 60: newmin = 0; hr += 1 # date rollover not imp.
dtmax = dtmax.replace( hour=hr, minute=newmin )
elif nearest == "minute":
# dtmin is all set, just compute dtmax...
newmin = dtmax.minute + 1
hr = dtmax.hour
if newmin >= 60: newmin = 0; hr += 1
dtmax = dtmax.replace( hour=hr, minute=newmin )
else: raise AppDt_Error( "findrange got unrecognized nearest= arg: " + str(nearest) )
axmin = calendar.timegm( dtmin.timetuple() )
axmax = calendar.timegm( dtmax.timetuple() )
# at this point, dtmin and dtmax are the axis min and max as datetime type
# and axmin and axmax are the axis min and max as int timestamps
# now build a list of ready-to-render stubs with int positions...
# will eventually add options for month rollover, year rollover, day rollover, etc.
stublist = []
iloop = 0
dtcur = dtmin
utime = axmin
stub = dtcur.strftime( stubformat ) # do the first stub
if inc2 != None and stub2first == True:
stub2 = dtcur.strftime( stub2format )
if stub2place == "prepend": stub = stub2 + stub
elif stub2place == "replace": stub = stub2
else: stub = stub + stub2
stublist.append( [utime, stub] )
while iloop < 500: # sanity backstop
yr = dtcur.year
mon = dtcur.month
day = dtcur.day
if inc == "month":
if mon == 12: dtcur = dtcur.replace( year=yr+1, month=1 )
else: dtcur = dtcur.replace( month=mon+1 )
elif inc == "3month":
if mon >= 10: dtcur = dtcur.replace( year=yr+1, month=1 )
else: dtcur = dtcur.replace( month=mon+3 )
elif inc == "week" or inc[:8] == "week_day": utime += 604800 # number of seconds in a 7 day week
elif inc == "day": utime += 86400
elif inc == "12hour": utime += 43200
elif inc == "6hour": utime += 21600
elif inc == "4hour": utime += 14400
elif inc == "3hour": utime += 10800
elif inc == "hour": utime += 3600
elif inc == "30minute": utime += 1800
elif inc == "10minute": utime += 600
elif inc == "minute": utime += 60
else: raise AppDt_Error( "findrange() does not recognize inc=" + str(inc) )
if inc not in ["month", "3month"]: dtcur = d.datetime.utcfromtimestamp( utime )
if inc != "day": utime = calendar.timegm( dtcur.timetuple() )
if utime > axmax: break
# create the formatted stub
stub = dtcur.strftime( stubformat )
# stub2: check for rollover to new year (etc)
if (inc2 == "year" and dtcur.year != yr) or \
(inc2 in ["month","3month"] and dtcur.month != mon) or \
(inc2 == "day" and dtcur.day != day):
stub2 = dtcur.strftime( stub2format )
if stub2place == "prepend": stub = stub2 + stub
elif stub2place == "replace": stub = stub2
else: stub = stub + stub2
stublist.append( [ utime, stub ] )
iloop += 1
Dtrange = collections.namedtuple( "Findrange", ["axmin", "axmax", "stublist"] )
return Dtrange( axmin, axmax, stublist )
|
grubbcode/minplot
|
svgdatashapes_dt.py
|
Python
|
mit
| 9,956 | 0.028726 |
"""tictactoe URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from tictactoe import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('tictactoe/', include('tictactoe.game.urls'), name='game'),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
# serving static files like this should not be done in production
|
jsonbrazeal/tictactoe
|
tictactoe/urls.py
|
Python
|
mit
| 1,037 | 0 |
# testing/schema.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import exclusions
from .. import schema, event
from . import config
__all__ = 'Table', 'Column',
table_options = {}
def Table(*args, **kw):
"""A schema.Table wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
kw.update(table_options)
if exclusions.against(config._current, 'mysql'):
if 'mysql_engine' not in kw and 'mysql_type' not in kw:
if 'test_needs_fk' in test_opts or 'test_needs_acid' in test_opts:
kw['mysql_engine'] = 'InnoDB'
else:
kw['mysql_engine'] = 'MyISAM'
# Apply some default cascading rules for self-referential foreign keys.
# MySQL InnoDB has some issues around seleting self-refs too.
if exclusions.against(config._current, 'firebird'):
table_name = args[0]
unpack = (config.db.dialect.
identifier_preparer.unformat_identifiers)
# Only going after ForeignKeys in Columns. May need to
# expand to ForeignKeyConstraint too.
fks = [fk
for col in args if isinstance(col, schema.Column)
for fk in col.foreign_keys]
for fk in fks:
# root around in raw spec
ref = fk._colspec
if isinstance(ref, schema.Column):
name = ref.table.name
else:
# take just the table name: on FB there cannot be
# a schema, so the first element is always the
# table name, possibly followed by the field name
name = unpack(ref)[0]
if name == table_name:
if fk.ondelete is None:
fk.ondelete = 'CASCADE'
if fk.onupdate is None:
fk.onupdate = 'CASCADE'
return schema.Table(*args, **kw)
def Column(*args, **kw):
"""A schema.Column wrapper/hook for dialect-specific tweaks."""
test_opts = dict([(k, kw.pop(k)) for k in list(kw)
if k.startswith('test_')])
if not config.requirements.foreign_key_ddl.enabled_for_config(config):
args = [arg for arg in args if not isinstance(arg, schema.ForeignKey)]
col = schema.Column(*args, **kw)
if test_opts.get('test_needs_autoincrement', False) and \
kw.get('primary_key', False):
if col.default is None and col.server_default is None:
col.autoincrement = True
# allow any test suite to pick up on this
col.info['test_needs_autoincrement'] = True
# hardcoded rule for firebird, oracle; this should
# be moved out
if exclusions.against(config._current, 'firebird', 'oracle'):
def add_seq(c, tbl):
c._init_items(
schema.Sequence(_truncate_name(
config.db.dialect, tbl.name + '_' + c.name + '_seq'),
optional=True)
)
event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
"_" + hex(hash(name) % 64)[2:]
else:
return name
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/sqlalchemy/testing/schema.py
|
Python
|
gpl-3.0
| 3,556 | 0 |
# -*- encoding: utf-8 -*-
import argparse
import sys
import traceback
from hashlib import md5
import mailchimp_marketing as MailchimpMarketing
import requests
from consolemsg import step, error, success
from erppeek import Client
import time
import configdb
ERP_CLIENT = Client(**configdb.erppeek)
MAILCHIMP_CLIENT = MailchimpMarketing.Client(
dict(api_key=configdb.MAILCHIMP_APIKEY, server=configdb.MAILCHIMP_SERVER_PREFIX)
)
def get_member_category_id():
module = 'som_partner_account'
semantic_id = 'res_partner_category_soci'
IrModelData = ERP_CLIENT.model('ir.model.data')
member_category_relation = IrModelData.get_object_reference(
module, semantic_id
)
if member_category_relation:
return member_category_relation[-1]
def get_not_members_email_list():
Soci = ERP_CLIENT.model('somenergia.soci')
ResPartnerAddress = ERP_CLIENT.model('res.partner.address')
category_id = get_member_category_id()
not_members = Soci.search([
('category_id', 'not in', [category_id]),
('ref', 'like', 'S%')
])
not_members_partner_ids = [
soci['partner_id'][0] for soci in Soci.read(not_members, ['partner_id'])
]
address_list = ResPartnerAddress.search(
[('partner_id', 'in', not_members_partner_ids)]
)
emails_list = [
address.get('email', 'not found')
for address in ResPartnerAddress.read(address_list, ['email'])
]
return emails_list
def get_mailchimp_list_id(list_name):
all_lists = MAILCHIMP_CLIENT.lists.get_all_lists(
fields=['lists.id,lists.name'],
count=100
)['lists']
for l in all_lists:
if l['name'] == list_name:
return l['id']
raise Exception("List: <{}> not found".format(list_name))
def get_subscriber_hash(email):
subscriber_hash = md5(email.lower()).hexdigest()
return subscriber_hash
def archive_members_from_list(list_name, email_list):
list_id = get_mailchimp_list_id(list_name)
operations = []
for email in email_list:
operation = {
"method": "DELETE",
"path": "/lists/{list_id}/members/{subscriber_hash}".format(
list_id=list_id,
subscriber_hash=get_subscriber_hash(email)
),
"operation_id": email,
}
operations.append(operation)
payload = {
"operations": operations
}
try:
response = MAILCHIMP_CLIENT.batches.start(payload)
except ApiClientError as error:
msg = "An error occurred an archiving batch request, reason: {}"
error(msg.format(error.text))
else:
batch_id = response['id']
while response['status'] != 'finished':
time.sleep(2)
response = MAILCHIMP_CLIENT.batches.status(batch_id)
step("Archived operation finished!!")
step("Total operations: {}, finished operations: {}, errored operations: {}".format(
response['total_operations'],
response['finished_operations'],
response['errored_operations']
))
result_summary = requests.get(response['response_body_url'])
result_summary.raise_for_status()
return result_summary.content
def archieve_members_in_list(list_name):
email_list = get_not_members_email_list()
result = archive_members_from_list(list_name, email_list)
return result
def main(list_name, output):
result = archieve_members_in_list(list_name.strip())
with open(output, 'w') as f:
f.write(result)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Archivieren Sie E-Mails in großen Mengen'
)
parser.add_argument(
'--list',
dest='list_name',
required=True,
help="nom de la llista de mailchimp"
)
parser.add_argument(
'--output',
dest='output',
required=True,
help="Fitxer de sortida amb els resultats"
)
args = parser.parse_args()
try:
main(args.list_name, args.output)
except Exception as e:
traceback.print_exc(file=sys.stdout)
error("El proceso no ha finalizado correctamente: {}", str(e))
else:
success("Script finalizado")
|
Som-Energia/invoice-janitor
|
admin/Baixa_Socis/unsubscribe_members.py
|
Python
|
agpl-3.0
| 4,279 | 0.002104 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptDialog.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.modeler.ModelerUtils import ModelerUtils
__author__ = 'Alexander Bruy'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import sys
import json
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.Qsci import *
from qgis.core import *
from qgis.utils import iface
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.HelpEditionDialog import HelpEditionDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.algs.r.RUtils import RUtils
from processing.script.ScriptAlgorithm import ScriptAlgorithm
from processing.script.ScriptUtils import ScriptUtils
from processing.ui.ui_DlgScriptEditor import Ui_DlgScriptEditor
import processing.resources_rc
class ScriptEditorDialog(QDialog, Ui_DlgScriptEditor):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
hasChanged = False
def __init__(self, algType, alg):
QDialog.__init__(self)
self.setupUi(self)
self.setWindowFlags(Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
# Set icons
self.btnSave.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnSaveAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.btnEditHelp.setIcon(QIcon(':/processing/images/edithelp.png'))
self.btnRun.setIcon(QIcon(':/processing/images/runalgorithm.png'))
self.btnCut.setIcon(QgsApplication.getThemeIcon('/mActionEditCut.png'))
self.btnCopy.setIcon(
QgsApplication.getThemeIcon('/mActionEditCopy.png'))
self.btnPaste.setIcon(
QgsApplication.getThemeIcon('/mActionEditPaste.png'))
self.btnUndo.setIcon(QgsApplication.getThemeIcon('/mActionUndo.png'))
self.btnRedo.setIcon(QgsApplication.getThemeIcon('/mActionRedo.png'))
# Connect signals and slots
self.btnSave.clicked.connect(self.save)
self.btnSaveAs.clicked.connect(self.saveAs)
self.btnEditHelp.clicked.connect(self.editHelp)
self.btnRun.clicked.connect(self.runAlgorithm)
self.btnCut.clicked.connect(self.editor.cut)
self.btnCopy.clicked.connect(self.editor.copy)
self.btnPaste.clicked.connect(self.editor.paste)
self.btnUndo.clicked.connect(self.editor.undo)
self.btnRedo.clicked.connect(self.editor.redo)
self.editor.textChanged.connect(lambda: self.setHasChanged(True))
self.alg = alg
self.algType = algType
if self.alg is not None:
self.filename = self.alg.descriptionFile
self.editor.setText(self.alg.script)
else:
self.filename = None
self.update = False
self.help = None
self.setHasChanged(False)
self.editor.setLexerType(self.algType)
def editHelp(self):
if self.alg is None:
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
elif self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
else:
alg = self.alg
dlg = HelpEditionDialog(alg)
dlg.exec_()
# We store the description string in case there were not saved
# because there was no filename defined yet
if self.alg is None and dlg.descriptions:
self.help = dlg.descriptions
def save(self):
self.saveScript(False)
def saveAs(self):
self.saveScript(True)
def saveScript(self, saveAs):
if self.filename is None or saveAs:
if self.algType == self.SCRIPT_PYTHON:
scriptDir = ScriptUtils.scriptsFolder()
filterName = self.tr('Python scripts (*.py)')
elif self.algType == self.SCRIPT_R:
scriptDir = RUtils.RScriptsFolder()
filterName = self.tr('Processing R script (*.rsx)')
self.filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save script'), scriptDir,
filterName))
if self.filename:
if self.algType == self.SCRIPT_PYTHON \
and not self.filename.lower().endswith('.py'):
self.filename += '.py'
if self.algType == self.SCRIPT_R \
and not self.filename.lower().endswith('.rsx'):
self.filename += '.rsx'
text = unicode(self.editor.text())
if self.alg is not None:
self.alg.script = text
try:
with codecs.open(self.filename, 'w', encoding='utf-8') as fout:
fout.write(text)
except IOError:
QMessageBox.warning(self, self.tr('I/O error'),
self.tr('Unable to save edits. Reason:\n %s')
% unicode(sys.exc_info()[1]))
return
self.update = True
# If help strings were defined before saving the script for
# the first time, we do it here
if self.help:
with open(self.filename + '.help', 'w') as f:
json.dump(self.help, f)
self.help = None
self.setHasChanged(False)
else:
self.filename = None
def setHasChanged(self, hasChanged):
self.hasChanged = hasChanged
self.btnSave.setEnabled(hasChanged)
def runAlgorithm(self):
if self.algType == self.SCRIPT_PYTHON:
alg = ScriptAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['script']
if self.algType == self.SCRIPT_R:
alg = RAlgorithm(None, unicode(self.editor.text()))
alg.provider = ModelerUtils.providers['r']
dlg = alg.getCustomParametersDialog()
if not dlg:
dlg = ParametersDialog(alg)
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
|
yordan-desta/QgisIns
|
python/plugins/processing/gui/ScriptEditorDialog.py
|
Python
|
gpl-2.0
| 7,417 | 0.000404 |
#!/usr/bin/python
"""
sieveshell - remotely manipulate sieve scripts
SYNOPSIS
sieveshell [--user=user] [--authname=authname] [--realm=realm]
[--exec=script] [--auth-mech=mechanism] server
sieveshell --help
sieveshell allows users to manipulate their scripts on a remote server.
It works via MANAGESIEVE, a work in progress protocol.
Use --help to get a list of the currently supported authentication
mechanisms.
The following commands are recognized:
list - list scripts on server
put <filename> [<target name>]
- upload script to server
get <name> [<filename>]
- get script. if no filename display to stdout
edit <name> - edit a script, if not existant, create on save
delete <name> - delete script.
activate <name> - set a script as the active script
deactivate - deactivate all scripts
quit - quit
"""
__version__ = "0.4"
__author__ = "Hartmut Goebel <h.goebel@crazy-compilers.com>"
__copyright__ = "Copyright (C) 2003-2011 by Hartmut Goebel <h.goebel@crazy-compilers.com>"
__license__ = "GPL"
import sys
import getpass
import inspect
import managesieve
import os
from .utils import read_config_defaults, exec_command
sieve = None
SUPPRESS = '--suppress--' # token for suppressing 'OK' after cmd execution
### the order of functions determines the order for 'help' ###
def cmd_help(cmd=None):
"""help - this screen (shortcut '?')
help <command> - help on command"""
## output order is the same as the sourcecode order
if cmd:
if __command_map.has_key(cmd):
cmd = __command_map[cmd]
if __commands.has_key('cmd_%s' % cmd):
print __commands['cmd_%s' % cmd].__doc__
else:
print 'Unknown command', repr(cmd)
print "Type 'help' for list of commands"
else:
cmds = __commands.values()
cmds.sort(lambda a,b: cmp(a.func_code.co_firstlineno,
b.func_code.co_firstlineno))
for c in cmds:
print c.__doc__
return SUPPRESS
def cmd_list():
"""list - list scripts on server"""
res, scripts = sieve.listscripts()
if res == 'OK':
for scriptname, active in scripts:
if active: print scriptname, '\t<<-- active'
else: print scriptname
res = SUPPRESS
return res
def cmd_put(filename, scriptname=None):
"""put <filename> [<target name>]
- upload script to server"""
if not scriptname: scriptname = filename
try:
scriptdata = open(filename).read()
except IOError, e:
print "Can't read local file %s:" % filename, e.args[1]
return SUPPRESS
return sieve.putscript(scriptname, scriptdata)
def cmd_get(scriptname, filename=None):
"""get <name> [<filename>]
- get script. if no filename display to stdout"""
res, scriptdata = sieve.getscript(scriptname)
if res == 'OK':
if filename:
try:
open(filename, 'w').write(scriptdata)
except IOError, e:
print "Can't write local file %s:" % filename, e.args[1]
return SUPPRESS
else:
print scriptdata
res = SUPPRESS
return res
def cmd_edit(scriptname):
"""edit <name> - edit a script, not existant, create on save"""
def Choice(msg, choices):
while 1:
sys.stdout.writelines((msg, ' '))
answer = sys.stdin.readline().strip()[:1].lower()
i = choices.find(answer)
if i >= 0:
# valid answer
return i
# else: continue loop
def YesNoQuestion(msg):
# Order 'ny' will return boolen values (y=1)
return Choice(msg + ' (y/n)', 'ny')
def SaveToFile(msg, scriptname, tmpname):
if not YesNoQuestion('%s Save script to file?' % msg):
return
scriptname = os.path.join(os.getcwd(), scriptname)
sys.stdout.write('Enter filename (default %s):' % scriptname)
filename = sys.stdin.readline().strip()
if filename == '':
filename = scriptname
scriptdata = open(tmpname).read()
open(filename, 'w').write(scriptdata)
res, scripts = sieve.listscripts()
if res != 'OK': return res
for name, active in scripts:
if name == scriptname:
res, scriptdata = sieve.getscript(scriptname)
if res != 'OK': return res
break
else:
if not YesNoQuestion('Script not on server. Create new?'):
return 'OK'
# else: script will be created when saving
scriptdata = ''
import tempfile
filename = tempfile.mktemp('.siv')
open(filename, 'w').write(scriptdata)
editor = os.environ.get('EDITOR', 'vi')
while 1:
res = os.system('%s %s' % (editor, filename))
if res: # error editing
if not YesNoQuestion('Editor returned failture. Continue?'):
os.remove(filename)
return SUPPRESS
else:
continue # re-edit
# else: editing okay
while 1:
scriptdata = open(filename).read()
res = sieve.putscript(scriptname, scriptdata)
if res == 'OK':
return res
# res is NO, BYE
print res, sieve.response_text or sieve.response_code
if res == 'NO':
res = Choice('Upload failed. (E)dit/(R)etry/(A)bort?', 'era')
if res == 0: break # finish inner loop, return to 'edit'
elif res == 1: # retry upload
continue
SaveToFile('', scriptname, filename)
else: # BYE
SaveToFile('Server closed connection.', scriptname, filename)
print 'Deleting tempfile.'
os.remove(filename)
return SUPPRESS
raise "Should not come here."
if os.name != 'posix':
del cmd_edit
def cmd_delete(scriptname):
"""delete <name> - delete script."""
return sieve.deletescript(scriptname)
def cmd_activate(scriptname):
"""activate <name> - set a script as the active script"""
return sieve.setactive(scriptname)
def cmd_deactivate():
"""deactivate - deactivate all scripts"""
return sieve.setactive('')
def cmd_quit(*args):
"""quit - quit"""
print 'quitting.'
if sieve:
try:
# this mysteriously fails at times
sieve.logout()
except:
pass
raise SystemExit()
# find all commands (using introspection)
# NB: edit os only available when running on a posix system
__commands = dict([c
for c in inspect.getmembers(sys.modules[__name__],
inspect.isfunction)
if c[0].startswith('cmd_')
])
# command aliases/shortcuts
__command_map = {
'?': 'help',
'h': 'help',
'q': 'quit',
'l': 'list',
'del': 'delete',
}
def shell(auth, user=None, passwd=None, realm=None,
authmech='', server='', use_tls=0, port=managesieve.SIEVE_PORT):
"""Main part"""
def cmd_loop():
"""Command loop: read and execute lines from stdin."""
global sieve
while 1:
sys.stdout.write('> ')
line = sys.stdin.readline()
if not line:
# EOF/control-d
cmd_quit()
break
line = line.strip()
if not line: continue
# todo: parse command line correctly
line = line.split()
cmd = __command_map.get(line[0], line[0])
cmdfunc = __commands.get('cmd_%s' % cmd)
if not cmdfunc:
print 'Unknown command', repr(cmd)
else:
if __debug__: result = None
try:
result = cmdfunc(*line[1:])
except TypeError, e:
if str(e).startswith('%s() takes' % cmdfunc.__name__):
print 'Wrong number of arguments:'
print '\t', cmdfunc.__doc__
continue
else:
raise
assert result != None
if result == 'OK':
print result
elif result == SUPPRESS:
# suppress 'OK' for some commands (list, get)
pass
else:
print result, sieve.response_text or sieve.response_code
if result == "BYE":
# quit when server send BYE
cmd_quit()
global sieve
try:
print 'connecting to', server
try:
if not auth: auth = getpass.getuser()
if not user: user = auth
if not passwd: passwd = getpass.getpass()
except EOFError:
# Ctrl-D pressed
print # clear line
return
sieve = managesieve.MANAGESIEVE(server, port=port, use_tls=use_tls)
print 'Server capabilities:',
for c in sieve.capabilities: print c,
print
try:
if not authmech:
# auto-select best method available
res = sieve.login(authmech, user, passwd)
elif authmech.upper() == 'LOGIN':
# LOGIN does not support authenticator
res = sieve.authenticate(authmech, user, passwd)
else:
res = sieve.authenticate(authmech, auth, user, passwd)
except sieve.error, e:
print "Authenticate error: %s" % e
cmd_quit()
if res != 'OK':
print res, sieve.response_text or sieve.response_code
cmd_quit()
cmd_loop()
except KeyboardInterrupt:
print
cmd_quit()
def main():
"""Parse options and call interactive shell."""
try:
from optparse import OptionParser
except ImportError:
from optik import OptionParser
parser = OptionParser('Usage: %prog [options] server')
parser.add_option('--authname',
help= "The user to use for authentication "
"(defaults to current user).")
parser.add_option('--user', dest='username',
help = "The authorization name to request; "
"by default, derived from the "
"authentication credentials.")
parser.add_option('--passwd', help = "The password to use.")
parser.add_option('--realm',
help= "The realm to attempt authentication in.")
parser.add_option('--auth-mech', default="",
help= "The SASL authentication mechanism to use "
"(default: auto select; available: %s)." % ', '.join(managesieve.AUTHMECHS))
parser.add_option('--script', '--script-file',
help= "Instead of working interactively, run "
"commands from SCRIPT, and exit when done.")
parser.add_option('--use-tls', '--tls', action="store_true",
help="Switch to TLS if server supports it.")
parser.add_option('--port', type="int", default=managesieve.SIEVE_PORT,
help="port number to connect to (default: %default)")
parser.add_option('-v', '--verbose', action='count', default=0,
help='Be verbose. May be given several times to increase verbosity')
parser.add_option('-x', '--password-command', dest='password_command',
help="Shell command to execute to get the password")
config_file = os.environ.get("MANAGESIEVE_CONFIG")
if config_file:
read_config_defaults(config_file, parser)
options, args = parser.parse_args()
# handle password-command
if options.password_command:
options.passwd = exec_command(options.password_command)
if options.auth_mech and not options.auth_mech.upper() in managesieve.AUTHMECHS:
parser.error("Authentication mechanism %s is not supported. Choose one of %s" % (options.auth_mech.upper(), ', '.join(managesieve.AUTHMECHS)))
if len(args) != 1:
parser.error("Argument 'server' missing.")
server = args[0]
if options.verbose:
level = managesieve.INFO
if options.verbose > 1:
level = managesieve.DEBUG0 - (options.verbose-2)
import logging
logging.basicConfig(level=level, format="%(message)s")
shell(options.authname, options.username, options.passwd,
options.realm, options.auth_mech, server, options.use_tls,
options.port)
return 0
if __name__ == "__main__":
if __doc__ is None:
raise SystemExit('Must not be run with Python option -OO (removed doc-strings)')
raise SystemExit(main())
|
piger/managesieve-cli
|
managesieve/sieveshell.py
|
Python
|
gpl-3.0
| 13,053 | 0.003601 |
"""
Module defining the Spline class, something easy to wrap around SciPy splines.
Includes BOK algorithms (Mollinari et al)
Some rules of splrep (k = 3)
- do not put more then 2 knots between data points.
- splrep wants inner knots only, do not give extremal knots, even only "once".
"""
import numpy as np
import sys
import pycs.gen.util
import copy as pythoncopy
import matplotlib.pyplot as plt
import scipy.optimize as spopt
import scipy.interpolate as si
class DataPoints():
"""
An ultralight version of a lightcurve, made for fast computations.
Can be "merged" from a list of lightcurves, see factory function below.
A Spline object has such a DataPoints object as attribute.
ATTENTION
Datapoints are expected to be ALWAYS SORTED BY JDS, and no two datapoints have the same jd !
See the splitup option of the constructor.
Note that this is not the case for lightcurves ! Hence the existence of datapoints.
Should be enforced in every function that builds datapoints.
ABOUT STAB POINTS
With scipy splines, we always get the last knots at the extrema of data points.
So to get knots "outside" of the real datapoints, we have to insert fake points.
And while we are at it, these fake points can also be used to stabilize the spline in
gaps.
The mask is used to differentiate between actual data points and "stabilization points"
that are inserted to make the spline behave well at the extrema and in season gaps.
It is modified by the two addgappts and addextpts.
The info about stabpoints is written into the object,
so that they can be reconstrucuted from any new jds and mags.
"""
def __init__(self, jds, mags, magerrs, splitup=True, deltat=0.000001, sort=True, stab=False,
stabext=300.0, stabgap = 30.0, stabstep = 5.0, stabmagerr = -2.0, stabrampsize = 0, stabrampfact = 1.0):
"""
Constructor
Always leave splitup and sort on True ! Only if you know that you are already
sorted you can skip them.
You cannot specify a mask, I do this myself. (could be done in principle).
stab : do you want stabilization points ?
Don't forget to run splitup, sort, and addstab again if you change the data !
"""
self.jds = jds
self.mags = mags
self.magerrs = magerrs
self.stab = stab
self.stabext = stabext
self.stabgap = stabgap
self.stabstep = stabstep
self.stabmagerr = stabmagerr
self.stabrampsize = stabrampsize
self.stabrampfact = stabrampfact
self.mask = np.ones(len(self.jds), dtype=np.bool) # an array of True
self.deltat = deltat
if splitup:
self.splitup()
elif sort: # If we do the splitup, we sort anyway.
self.sort()
self.putstab()
# def update(self, jds, mags, magerrs):
# """
# NOT NEEDED ANYMORE, JUST CALL MERGE AND GIVE AN OLDDP. SAFER.
#
# Give me some new datapoints (no stabs) (already splitup and sorted, by definition), I'll update myself.
# In fact everything might move !
# """
# if newdatapoints.stab = True:
# raise RuntimeError("Give me points without stab !")
# self.jds = newdatapoints.jds
# self.mags = newdatapoints.mags
# self.magerrs = newdatapoints.magerrs
# self.mask = np.ones(len(self.jds), dtype=np.bool)
# self.addstab() # runs only if stab = True
def splitup(self):
"""
TO WRITE !!!
We avoid that two points get the same jds...
Note that this might change the order of the jds,
but only of very close ones, so one day it would be ok to leave the mags as they are.
"""
self.jds += self.deltat * np.random.randn(len(self.jds))
self.sort()
def sort(self):
"""
Absolutely mandatory, called in the constructor.
"""
sortedindices = np.argsort(self.jds)
self.jds = self.jds[sortedindices]
self.mags = self.mags[sortedindices]
self.magerrs = self.magerrs[sortedindices]
self.mask = self.mask[sortedindices]
self.validate()
def validate(self):
"""
We check that the datapoint jds are increasing strictly :
"""
first = self.jds[:-1]
second = self.jds[1:]
if not np.alltrue(np.less(first,second)): # Not less_equal ! Strictly increasing !
raise RuntimeError, "These datapoints don't have strcitly increasing jds !"
def rmstab(self):
"""
Deletes all stabilization points
"""
self.jds = self.jds[self.mask]
self.mags = self.mags[self.mask]
self.magerrs = self.magerrs[self.mask]
self.mask = np.ones(len(self.jds), dtype=np.bool)
def putstab(self):
"""
Runs only if stab is True.
I will :
add datapoints (new jds, new mags, new magerrs)
modify the mask = False for all those new datapoints.
"""
if self.stab == True:
# We start by deleting any previous stab stuff :
self.rmstab()
self.addgappts()
self.addextpts()
else:
pass
def calcstabmagerr(self):
"""
Computes the mag err of the stabilisation points.
"""
if self.stabmagerr >= 0.0:
return self.stabmagerr
else:
return - self.stabmagerr * np.median(self.magerrs)
def addgappts(self):
"""
We add stabilization points with low weights into the season gaps
to avoid those big excursions of the splines.
This is done by a linear interpolation across the gaps.
"""
absstabmagerr = self.calcstabmagerr()
gaps = self.jds[1:] - self.jds[:-1] # has a length of len(self.jds) - 1
gapindices = np.arange(len(self.jds) - 1)[gaps > self.stabgap] # indices of those gaps that are larger than stabgap
for n in range(len(gapindices)):
i = gapindices[n]
a = self.jds[i]
b = self.jds[i+1]
newgapjds = np.linspace(a, b, float(b-a)/float(self.stabstep))[1:-1]
newgapindices = i + 1 + np.zeros(len(newgapjds))
newgapmags = np.interp(newgapjds, [a, b], [self.mags[i], self.mags[i+1]])
newgapmagerrs = absstabmagerr * np.ones(newgapmags.shape)
newgapmask = np.zeros(len(newgapjds), dtype=np.bool)
self.jds = np.insert(self.jds, newgapindices, newgapjds)
self.mags = np.insert(self.mags, newgapindices, newgapmags)
self.magerrs = np.insert(self.magerrs, newgapindices, newgapmagerrs)
self.mask = np.insert(self.mask, newgapindices, newgapmask)
gapindices += newgapjds.size # yes, as we inserted some points the indices change.
# If you change this structure, be sure to check SplineML.settargetmags as well !
self.validate()
def addextpts(self):
"""
We add stabilization points at both extrema of the lightcurves
This is done by "repeating" the extremal points, and a ramp in the magerrs
"""
absstabmagerr = self.calcstabmagerr()
extjds = np.arange(self.jds[0], self.jds[0] - self.stabext, -1*self.stabstep)[::-1][:-1]
extmags = self.mags[0] * np.ones(extjds.shape)
extmagerrs = absstabmagerr * np.ones(extjds.shape)
for i in range(1, self.stabrampsize+1):
extmagerrs[-i] += (self.stabrampsize +1 -i) * absstabmagerr * self.stabrampfact
extindices = np.zeros(extjds.shape)
mask = np.zeros(len(extjds), dtype=np.bool)
self.jds = np.insert(self.jds, extindices, extjds)
self.mags = np.insert(self.mags, extindices, extmags)
self.magerrs = np.insert(self.magerrs, extindices, extmagerrs)
self.mask = np.insert(self.mask, extindices, mask)
# And the same at the other end :
extjds = np.arange(self.jds[-1], self.jds[-1] + self.stabext, self.stabstep)[1:]
extmags = self.mags[-1] * np.ones(extjds.shape)
extmagerrs = absstabmagerr * np.ones(extjds.shape)
for i in range(0, self.stabrampsize):
extmagerrs[i] += (self.stabrampsize -i) * absstabmagerr * self.stabrampfact
extindices = len(self.jds) + np.zeros(extjds.shape)
mask = np.zeros(len(extjds), dtype=np.bool)
self.jds = np.insert(self.jds, extindices, extjds)
self.mags = np.insert(self.mags, extindices, extmags)
self.magerrs = np.insert(self.magerrs, extindices, extmagerrs)
self.mask = np.insert(self.mask, extindices, mask)
self.validate()
def getmaskbounds(self):
"""
Returns the upper and lower bounds of the regions containing stabilization points.
This is used when placing knots, so to put fewer knots in these regions.
Crazy stuff...
"""
maskindices = np.where(self.mask == False)[0]
#print maskindices
if len(maskindices) < 3:
print "Hmm, not much masked here ..."
return (np.array([]), np.array([]))
else:
lcuts = maskindices[np.where(maskindices[1:] - maskindices[:-1] > 1)[0] + 1]
lcuts = np.insert(lcuts, 0, maskindices[0])
ucuts = maskindices[np.where(maskindices[1:] - maskindices[:-1] > 1)[0]]
ucuts = np.insert(ucuts, len(ucuts), maskindices[-1])
return (lcuts, ucuts)
def ntrue(self):
"""
Returns the number of real datapoints (skipping stabilization points)
"""
return np.sum(self.mask)
def merge(lcs, olddp=None, splitup=True, deltat=0.000001, sort=True, stab=False,
stabext=300.0, stabgap = 30.0, stabstep = 5.0, stabmagerr = 2.0, stabrampsize = 0, stabrampfact = 1.0):
"""
Factory function for DataPoints objects, starting from lightcurves.
Takes a list of lightcurves and quickly concatenate the jds, mags, and magerrs.
Instead of specifying all the stab point parameters, you can give me an old datapoints object,
and I will reuse its settings... This is useful if you want to "update" the data points.
If overlap is True, I will keep only points that are "covered" by all four lightcurves !
This is useful when you want to build a first source spline, and your microlensing is messy at the borders.
NOT YET IMPLEMENTED ...
"""
jds = np.concatenate([l.getjds() for l in lcs])
mags = np.concatenate([l.getmags() for l in lcs])
magerrs = np.concatenate([l.getmagerrs() for l in lcs])
if olddp == None:
return DataPoints(jds, mags, magerrs, splitup=splitup, deltat=deltat, sort=sort,
stab=stab, stabext=stabext, stabgap=stabgap, stabstep=stabstep, stabmagerr=stabmagerr,
stabrampsize=stabrampsize, stabrampfact=stabrampfact)
else:
return DataPoints(jds, mags, magerrs, splitup=splitup, sort=sort,
deltat=olddp.deltat,
stab=olddp.stab, stabext=olddp.stabext, stabgap=olddp.stabgap, stabstep=olddp.stabstep, stabmagerr=olddp.stabmagerr,
stabrampsize=olddp.stabrampsize, stabrampfact=olddp.stabrampfact)
class Spline():
"""
A class to represent a spline, that is essentially a set of knots and coefficients.
As finding knots and coefficients requires access to some data points, these are included
in the form of a DataPoints object.
Abount knots :
Spline.t are all the knots, including extremas with multiplicity.
But splrep wants internal knots only ! By internal we mean : not even the data extremas !
Spline.getintt() returns only these internal knots.
"""
def __init__(self, datapoints, t = None, c = None, k = 3, bokeps = 2.0, boktests = 5, bokwindow = None, plotcolour="black"):
"""
t : all the knots (not only internal ones !)
c : corresponding coeffs
k : degree : default = cubic splines k=3 -> "order = 4" ???
whatever ... 3 means that you can differentiate twice at the knots.
"""
#self.origdatapoints = datapoints
self.datapoints = datapoints
# At this point we know that your datapoint jds are monotonously increasing. This is tested
# by validate() of datapoints.
self.t = t # the array of knots
self.c = c # the coeffs
self.k = k
self.bokeps = bokeps
self.boktests = boktests
self.bokwindow = bokwindow
self.knottype = "none"
self.plotcolour = plotcolour
self.showknots = True
# Bounds, for BOK
self.lims = None
self.l = None
self.u = None
# We want to keep trace of the r2 of a spline.
self.lastr2nostab = 0.0 # without stab points (the real thing)
self.lastr2stab = 0.0 # with stab points (usually not so interesting)
# If you did not give me a t&c, I'll make some default ones for you :
try:
if (self.t is None):
self.uniknots(2) # This also puts self.c to 0s
except:
if (len(self.t) == 0):
self.uniknots(2) # This also puts self.c to 0s
def __str__(self):
"""
Returns a string with:
* degree
* knot placement
* number of intervals
"""
#return "Spline of degree %i, %i knots (%i inner knots), and %i intervals." % (self.k, len(self.t), len(self.getintt()), self.getnint())
if len(self.knottype) > 6: # That's a string
knottext = "%il%ib" % (self.knottype.count("l"), self.knottype.count("b"))
else:
knottext = self.knottype
return "~%i/%s/%i~" % (self.k, knottext, self.getnint())
def copy(self):
"""
Returns a "deep copy" of the spline.
"""
return pythoncopy.deepcopy(self)
def shifttime(self, timeshift):
"""
Hard-shifts your spline along the time axis.
By "hard-shift", I mean that unlike for a lightcurve, the spline will not know that it was shifted !
It's up to you to be sure that you want to move it.
We shift both the datapoints and the knots.
"""
self.t += timeshift
self.datapoints.jds += timeshift
def shiftmag(self, magshift):
"""
Hard-shifts your spline along the mag axis.
By "hard-shift", I mean that unlike for a lightcurve, the spline will not know that it was shifted !
It's up to you to be sure that you want to move it.
We shift both the datapoints and the knots.
"""
self.c += magshift
self.datapoints.mags += magshift
def updatedp(self, newdatapoints, dpmethod="stretch"):
"""
Replaces the datapoints of the spline, and makes sure that the knots
stay compatible.
If you tweaked your datapoints, I will have to tweak my knots to make sure
that my external knots fit. Hence this method !
Due to the splitup, this is needed even if you just tweaked the mags !
And anyway in this case I have to rebuild the stab points.
.. warning :: IT'S UP TO YOU TO CHECK THAT YOU DON'T REPLACE DATATOINTS WITH DIFFERENT STAB SETTINGS
Anyway it would work, just look ugly !
Replaces the datapoints (jds, mags, and magerrs) touching the knots and coeffs as less as possible.
Note that we also have to deal with stab points here !
This is made for instance for time shifts that only very slightly change the datapoints, and you don't want to
optimize the knots all the time from scratch again.
The current knots are "streched" (keeping their relative spacings) accross the new datapoints.
Options for "dpmethod" :
- "stretch" : changes all the knots
- "extadj" : does not touch the internal knots, but adjusts the external ones only, to
fit the new datapoints. Probably the method to use when optimizing time shifts.
- "leave" : does not touch the knots -> ok to evaluate the spline,
but you will not be able to fit it anymore, as the external knots don't correspond to datapoints.
.. todo:: In principle, why don't we just update the real datapoints here, and leave the stab as
they are ?
"""
if dpmethod == "stretch":
oldmin = self.datapoints.jds[0] # This includes potential stab points
oldmax = self.datapoints.jds[-1]
newmin = newdatapoints.jds[0] # Idem
newmax = newdatapoints.jds[-1]
oldknots = self.getinttex()
#print oldknots
# we will stretch the oldknots by a factor a :
a = (newmax - newmin)/(oldmax - oldmin)
newknots = newmin + a*(oldknots-oldmin)
# We set the new datapoints:
self.datapoints = newdatapoints
self.setinttex(newknots)
elif dpmethod == "extadj" :
intknots = self.getintt()
self.datapoints = newdatapoints
# Ok, now the newdatapoints might be narrower or wider than the knots, we have to deal with this.
# If they are wider, it's easy : setint will put move the external knot on the external datapoint.
# If they are narrower, it's trickier : we have to remove some extra knots, so to really just keep the "internal" ones.
# to feed into setintt.
#if True: # works as well, but maybe faster to test first :
if (self.datapoints.jds[0] >= intknots[0]) or (self.datapoints.jds[-1] <= intknots[-1]):
keepmask = np.ones(intknots.shape, dtype=np.bool)
for i in range(len(intknots)): # Starting from the left ...
if intknots[i] <= self.datapoints.jds[0]:
keepmask[i] = False
else:
break
for i in range(len(intknots))[::-1]: # And now the right ...
if intknots[i] >= self.datapoints.jds[-1]:
keepmask[i] = False
else:
break
#nkick = np.sum(keepmask == False)
#if nkick != 0:
# print "I'll kick %i knots !" % (nkick)
# And finally, we apply the mask .
intknots = intknots[keepmask]
self.setintt(intknots) # This automatically adjusts the extremal knots.
elif dpmethod == "leave" :
knots = self.getinttex()
self.datapoints = newdatapoints
# We quickly check the boundaries
if ( knots[0] >= self.datapoints.jds[0] ) or ( knots[-1] <= self.datapoints.jds[-1] ):
raise RuntimeError("Your newdatapoints are to wide for the current knots !")
else:
raise RuntimeError("Don't know this updatedp method !")
# We reset any bounds just to be sure.
self.lims = None
self.l = None
self.u = None
def uniknots(self, nint, n=True):
"""
Uniform distribution of internal knots across the datapoints (including any stab points).
We don't make a difference between stab and real points.
:param nint: The number of intervals, or the step
:param n:
If True, nint is the number of intervals (== piecewise polynoms) you want.
If False : nint is a step in days you want between the knots (approximately).
:type n: boolean
.. note:: I also put all coeffs back to 0.0 !
"""
#intt = np.linspace(self.datapoints.jds[0], self.datapoints.jds[-1], step+1)[1:-1] # we remove the extremas
a = self.datapoints.jds[0]
b = self.datapoints.jds[-1]
if n:
intt = np.linspace(a, b, nint + 1)[1:-1]
else:
intt = np.linspace(a, b, float(b-a)/float(nint))[1:-1]
if len(intt) == 0:
raise RuntimeError("I am uniknots, and I have only 0 (zero) internal knots ! Increase this number !")
self.setintt(intt)
self.knottype = "u"
# Important : we put some 0 coeffs to go with the new knots
self.resetc()
def resetc(self):
"""
Sets all coeffs to 0.0 -- if you want to start again your fit, keeping the knot positions.
"""
self.c = np.zeros(len(self.t))
def reset(self):
"""
Calls uniknots, i.e. resets both coeffs and knot positions, keeping the same number of knots.
"""
self.uniknots(self.getnint() ,n=True)
def buildbounds(self, verbose = True):
"""
Build bounds for bok.
By default I will make those bounds as wide as possible, still respecting epsilon.
The parameter epsilon is the minimum distance two knots can have.
If you give me a window size, I will not make the bounds as wide as possible, but only put them
0.5*window days around the current knots (still respecting all this epsilon stuff of course).
I look where your current knots are, and for each knots I build the bounds so that
epsilon distance is respected between adjacent upper and lower bounds.
But, there might already be knots only epsilon apart.
So I'm a bit tricky, not so straightforward as my predecessors.
Knots at the extrema are not allowed to move.
Requires existing knots, puts lims in between them, and builds the bounds.
@todo: Optimize me using numpy ! This is experiemental code for now.
"""
if verbose:
print "Building BOK bounds (bokeps = %.3f, bokwindow = %s) ..." % (self.bokeps, self.bokwindow)
knots = self.getinttex() # Including extremal knots (once).
n = len(knots)
# We start by checking the knot spacing
knotspacings = knots[1:] - knots[:-1]
if not np.alltrue(knotspacings > 0.0):
raise RuntimeError("Ouch, your knots are not sorted !")
minspace = np.min(knotspacings)
if verbose :
print "Minimal knot spacing : %.3f" % (minspace)
if minspace < self.bokeps - 0.00001: # Rounding errors, we decrease epsilon a bit...
# If this does still happens, then it was not just a rounding error ...
# Yes it still happens, due to updatedp stretch ...
raise RuntimeError("Knot spacing min = %f, epsilon = %f" % (minspace, self.bokeps))
# Loop through the knots.
lowers = [knots[0]] # First knot is not allowed to move
uppers = [knots[0]]
for i in range(1, n-1): # Internal knots
tk = knots[i] # this knot
pk = knots[i-1] # previous knot
nk = knots[i+1] # next knot
# First we build the wide bounds :
guessl = 0.5*(pk + tk) + 0.5*self.bokeps
if guessl >= tk:
guessl = tk
guessu = 0.5*(nk + tk) - 0.5*self.bokeps
if guessu <= tk:
guessu = tk
# Now we see if the use wants a narrower window within those bounds :
if self.bokwindow != None:
if tk - 0.5*self.bokwindow >= guessl:
guessl = tk - 0.5*self.bokwindow
if tk + 0.5*self.bokwindow <= guessu:
guessu = tk + 0.5*self.bokwindow
lowers.append(guessl)
uppers.append(guessu)
# And now this last knot, doesn't move, like the first one:
lowers.append(knots[-1])
uppers.append(knots[-1])
self.l = np.array(lowers)
self.u = np.array(uppers)
self.knottype += "l"
if verbose:
print "Buildbounds done."
def bok(self, bokmethod="BF", verbose=True, trace=False):
"""
We optimize the positions of knots by some various techniques.
We use fixed bounds for the exploration, run buildbounds (with low epsilon) first.
This means that I will not move my bounds.
For each knot, i will try ntestpos linearly spaced positions within its bounds.
In this version, the bounds are included : I might put a knot on a bound !
The way the bounds are placed by buildbounds ensures that in any case the minimal
distance of epsilon is respected.
Using this sheme, it is now possible to iteratively call mybok and buildbounds in a loop
and still respect epsilon at any time.
bokmethods :
- MCBF : Monte Carlo brute force with ntestpos trial positions for each knot
- BF : brute force, deterministic. Call me twice
- fminind : fminbound on one knot after the other.
- fmin :global fminbound
Exit is automatic, if result does not improve anymore...
"""
intknots = self.getintt() # only internal, the ones we will move
nintknots = len(intknots)
weights = 1.0/self.datapoints.magerrs
def score(intknots, index, value):
modifknots = intknots.copy()
modifknots[index] = value
return si.splrep(self.datapoints.jds, self.datapoints.mags, w=weights, xb=None, xe=None, k=self.k, task=-1, s=None, t=modifknots, full_output=1, per=0, quiet=1)[1]
iniscore = score(intknots, 0, intknots[0])
lastchange = 1
lastscore = iniscore
iterations = 0
if verbose:
print "Starting BOK-%s on %i intknots (boktests = %i)" % (bokmethod, nintknots, self.boktests)
if bokmethod == "MCBF":
while True:
if lastchange >= 2*nintknots: # somewhat arbitrary, but why not.
break
i = np.random.randint(0, nintknots) # (inclusive, exclusive)
testknots = np.linspace(self.l[i+1], self.u[i+1], self.boktests)
# +1, as u and l include extremal knots...
# So we include the extremas in our range to test.
testscores = np.array([score(intknots, i, testknot) for testknot in testknots])
bestone = np.argmin(testscores)
bestscore = testscores[bestone]
if bestscore < lastscore:
lastchange = 0
intknots[i] = testknots[bestone] # WE UPDATE the intknots array !
lastscore = bestscore
lastchange += 1
iterations += 1
if trace:
self.optc()
pycs.gen.util.trace([], [self])
if bokmethod == "BF":
intknotindices = range(nintknots) # We could potentially change the order, just to see if that makes sense.
# No, it doesn't really help
#mid = int(len(intknotindices)/2.0)
#intknotindices = np.concatenate([intknotindices[mid:], intknotindices[:mid][::-1]])
for i in intknotindices:
testknots = np.linspace(self.l[i+1], self.u[i+1], self.boktests)
# +1, as u and l include extremal knots...
# So we include the extremas in our range to test.
testscores = np.array([score(intknots, i, testknot) for testknot in testknots])
bestone = np.argmin(testscores)
bestscore = testscores[bestone]
intknots[i] = testknots[bestone] # WE UPDATE the intknots array !
iterations += 1
if trace:
self.optc()
pycs.gen.util.trace([], [self])
if bokmethod == "fminind":
intknotindices = range(nintknots)
for i in intknotindices:
def target(value):
return score(intknots, i, value)
#inival = intknots[i]
#bounds = (self.l[i+1], self.u[i+1])
out = spopt.fminbound(target, self.l[i+1], self.u[i+1], xtol=0.01, maxfun=100, full_output=1, disp=1)
#print out
optval = out[0]
bestscore = out[1]
intknots[i] = optval # WE UPDATE the intknots array !
iterations += 1
if trace:
self.optc()
pycs.gen.util.trace([], [self])
if bokmethod == "fmin":
def target(modifknots):
#iterations += 1
#if trace:
# self.optc()
# pycs.gen.util.trace([], [self])
return si.splrep(self.datapoints.jds, self.datapoints.mags, w=weights, xb=None, xe=None, k=self.k, task=-1, s=None, t=modifknots, full_output=1, per=0, quiet=1)[1]
bounds = [(a, b) for (a, b) in zip(self.l[1:-1], self.u[1:-1])]
out = spopt.fmin_l_bfgs_b(target, intknots, approx_grad=True, bounds=bounds, m=10, factr=1e7, pgtol=1.e-05, epsilon=1e-04, iprint=-1, maxfun=15000)
#out = spopt.fminbound(target, self.l[1:-1], self.u[1:-1], xtol=0.01, maxfun=1000, full_output=1, disp=3)
#print out
intknots = out[0]
bestscore = out[1]
# relative improvement :
relimp = (iniscore - bestscore)/iniscore
self.knottype += "b"
self.setintt(intknots)
#pycs.gen.lc.display([],[self])
#self.display()
self.optc() # Yes, not yet done !
finalr2 = self.r2(nostab=True)
if verbose:
print "r2 = %f (without stab poins)" % finalr2
print "Done in %i iterations, relative improvement = %f" % (iterations, relimp)
# We count all datapoints here, as score returns the full chi2 including stab pts.
return finalr2
# Some stuff about knots :
def getintt(self):
"""
Returns the internal knots (i.e., not even the datapoints extrema)
This is what you need to feed into splrep !
There are nint - 1 such knots
"""
return self.t[(self.k+1):-(self.k+1)].copy() # We cut the outer knots.
def getinttex(self):
"""
Same as above, but we include the extremal points "once".
"""
return self.t[(self.k):-(self.k)].copy()
def knotstats(self):
"""
Returns a string describing the knot spacing
"""
knots = self.getinttex()
spacings = knots[1:] - knots[:-1]
return " ".join(["%.1f" % (spacing) for spacing in sorted(spacings)])
def setintt(self, intt):
"""
Give me some internal knots (not even containing the datapoints extrema),
and I build the correct total knot vector t for you.
I add the extremas, with appropriate multiplicity.
@TODO: check consistency of intt with datapoints !
"""
# Ok a quick test for consisency :
if len(intt) == 0:
raise RuntimeError("Your list of internal knots is empty !")
if not self.datapoints.jds[0] < intt[0]:
raise RuntimeError("Ouch.")
if not self.datapoints.jds[-1] > intt[-1]:
raise RuntimeError("Ouch.")
#assert self.datapoints.jds[0] < intt[0] # should we put <= here ?
#assert self.datapoints.jds[-1] > intt[-1]
pro = self.datapoints.jds[0] * np.ones(self.k+1)
post = self.datapoints.jds[-1] * np.ones(self.k+1)
self.t = np.concatenate((pro, intt, post))
def setinttex(self, inttex):
"""
Including extremal knots
"""
#pro = self.datapoints.jds[0] * np.ones(self.k)
#post = self.datapoints.jds[-1] * np.ones(self.k)
pro = inttex[0] * np.ones(self.k)
post = inttex[-1] * np.ones(self.k)
self.t = np.concatenate((pro, inttex, post))
def getnint(self):
"""
Returns the number of intervals
"""
return(len(self.t) - 2* (self.k + 1) + 1)
# Similar stuff about coeffs :
def getc(self, m=0):
"""
Returns all active coefficients of the spline, the ones it makes sense to play with.
The length of this guy is number of intervals - 2 !
"""
return self.c[m:-(self.k + 1 + m)].copy()
def setc(self, c, m=0):
"""
Puts the coeffs from getc back into place.
"""
self.c[m:-(self.k + 1 + m)] = c
def getco(self, m=0):
"""
Same as getc, but reorders the coeffs in a way more suited for nonlinear optimization
"""
c = self.getc(m=m)
mid = int(len(c)/2.0)
return np.concatenate([c[mid:], c[:mid][::-1]])
def setco(self, c, m=0):
"""
The inverse of getco.
"""
mid = int(len(c)/2.0)
self.setc(np.concatenate([c[mid+1:][::-1], c[:mid+1]]), m=m)
def setcflat(self, c):
"""
Give me coeffs like those from getc(m=1), I will set the coeffs so that the spline extremas
are flat (i.e. slope = 0).
"""
self.setc(c, m=1)
self.c[0] = self.c[1]
self.c[-(self.k + 2)] = self.c[-(self.k + 3)]
def setcoflat(self, c):
"""
idem, but for reordered coeffs.
"""
mid = int(len(c)/2.0)
self.setcflat(np.concatenate([c[mid:][::-1], c[:mid]]))
def r2(self, nostab=True, nosquare=False):
"""
Evaluates the spline, compares it with the data points and returns a weighted sum of residuals r2.
If nostab = False, stab points are included
This is precisely the same r2 as is used by splrep for the fit, and thus the same value as
returned by optc !
This method can set lastr2nostab, so be sure to end any optimization with it.
If nostab = True, we don't count the stab points
"""
if nostab == True :
splinemags = self.eval(nostab = True, jds = None)
errs = self.datapoints.mags[self.datapoints.mask] - splinemags
werrs = errs/self.datapoints.magerrs[self.datapoints.mask]
if nosquare:
r2 = np.sum(np.fabs(werrs))
else:
r2 = np.sum(werrs * werrs)
self.lastr2nostab = r2
else :
splinemags = self.eval(nostab = False, jds = None)
errs = self.datapoints.mags - splinemags
werrs = errs/self.datapoints.magerrs
if nosquare:
r2 = np.sum(np.fabs(werrs))
else:
r2 = np.sum(werrs * werrs)
self.lastr2stab = r2
return r2
#if red:
# return chi2/len(self.datapoints.jds)
def tv(self):
"""
Returns the total variation of the spline. Simple !
http://en.wikipedia.org/wiki/Total_variation
"""
# Method 1 : linear approximation
ptd = 5 # point density in days ... this is enough !
a = self.t[0]
b = self.t[-1]
x = np.linspace(a, b, int((b-a) * ptd))
y = self.eval(jds = x)
tv1 = np.sum(np.fabs(y[1:] - y[:-1]))
#print "TV1 : %f" % (tv1)
return tv1
# Method 2 : integrating the absolute value of the derivative ... hmm, splint does not integrate derivatives ..
#si.splev(jds, (self.t, self.c, self.k))
def optc(self):
"""
Optimize the coeffs, don't touch the knots
This is the fast guy, one reason to use splines :-)
Returns the chi2 in case you want it (including stabilization points) !
Sets lastr2stab, but not lastr2nostab !
"""
out = si.splrep(self.datapoints.jds, self.datapoints.mags, w=1.0/self.datapoints.magerrs, xb=None, xe=None, k=self.k, task=-1, s=None, t=self.getintt(), full_output=1, per=0, quiet=1)
# We check if it worked :
if not out[2] <= 0:
raise RuntimeError("Problem with spline representation, message = %s" % (out[3]))
self.c = out[0][1] # save the coeffs
#import matplotlib.pyplot as plt
#plt.plot(self.datapoints.jds, self.datapoints.magerrs)
#plt.show()
self.lastr2stab = out[1]
return out[1]
def optcflat(self, verbose = False):
"""
Optimizes only the "border coeffs" so to get zero slope at the extrema
Run optc() first ...
This has to be done with an iterative optimizer
"""
full = self.getc(m=1)
inip = self.getc(m=1)[[0, 1, -2, -1]] # 4 coeffs
def setp(p):
full[[0, 1, -2, -1]] = p
self.setcflat(full)
if verbose:
print "Starting flat coeff optimization ..."
print "Initial pars : ", inip
def errorfct(p):
setp(p)
return self.r2(nostab=False) # To get the same as optc would return !
minout = spopt.fmin_powell(errorfct, inip, full_output=1, disp=verbose)
popt = minout[0]
if popt.shape == ():
popt = np.array([popt])
if verbose:
print "Optimal pars : ", popt
setp(popt)
return self.r2(nostab=False) # We include the stab points, like optc does.
# This last line also updates self.lastr2 ...
def eval(self, jds = None, nostab = True):
"""
Evaluates the spline at jds, and returns the corresponding mags-like vector.
By default, we exclude the stabilization points !
If jds is not None, we use them instead of our own jds (in this case excludestab makes no sense)
"""
if jds is None:
if nostab:
jds = self.datapoints.jds[self.datapoints.mask]
else:
jds = self.datapoints.jds
else:
# A minimal check for non-extrapolation condition should go here !
pass
fitmags = si.splev(jds, (self.t, self.c, self.k))
# By default ext=0 : we do return extrapolated values
return fitmags
def display(self, showbounds = True, showdatapoints = True, showerrorbars=True, figsize=(16,8)):
"""
A display of the spline object, with knots, jds, stab points, etc.
For debugging and checks.
"""
fig = plt.figure(figsize=figsize)
if showdatapoints:
if showerrorbars:
mask = self.datapoints.mask
plt.errorbar(self.datapoints.jds[mask], self.datapoints.mags[mask], yerr=self.datapoints.magerrs[mask], linestyle="None", color="blue")
if not np.alltrue(mask):
mask = mask == False
plt.errorbar(self.datapoints.jds[mask], self.datapoints.mags[mask], yerr=self.datapoints.magerrs[mask], linestyle="None", color="gray")
else:
plt.plot(self.datapoints.jds, self.datapoints.mags, "b,")
if (np.any(self.t) != None) :
if getattr(self, "showknots", True) == True:
for knot in self.t:
plt.axvline(knot, color="gray")
# We draw the spline :
xs = np.linspace(self.datapoints.jds[0], self.datapoints.jds[-1], 1000)
ys = self.eval(jds = xs)
plt.plot(xs, ys, "b-")
if showbounds :
if (np.any(self.l) != None) and (np.any(self.u) != None) :
for l in self.l:
plt.axvline(l, color="blue", dashes=(4, 4))
for u in self.u:
plt.axvline(u, color="red", dashes=(5, 5))
axes = plt.gca()
axes.set_ylim(axes.get_ylim()[::-1])
plt.show()
# Some functions to interact directly with lightcurves :
def fit(lcs, knotstep=20.0, n=None, knots=None, stab=True,
stabext=300.0, stabgap=20.0, stabstep=5.0, stabmagerr=-2.0, stabrampsize=0, stabrampfact=1.0,
bokit=1, bokeps=2.0, boktests=5, bokwindow=None, k=3, verbose=True):
"""
The highlevel function to make a spline fit.
lcs : a list of lightcurves (I will fit the spline through the merged curves)
Specify either
knotstep : spacing of knots
or
n : how many knots to place
or
knots : give me actual initial knot locations, for instance prepared by seasonknots.
stab : do you want to insert stabilization points ?
stabext : number of days to the left and right to fill with stabilization points
stabgap : interval of days considered as a gap to fill with stab points.
stabstep : step of stab points
stabmagerr : if negative, absolte mag err of stab points. If positive, the error bar will be stabmagerr times the median error bar of the data points.
bokit : number of BOK iterations (put to 0 to not move knots)
bokeps : epsilon of BOK
boktests : number of test positions for each knot
"""
dp = merge(lcs, stab=stab, stabext=stabext, stabgap=stabgap, stabstep=stabstep, stabmagerr=stabmagerr, stabrampsize=stabrampsize, stabrampfact=stabrampfact)
s = Spline(dp, k=k, bokeps=bokeps, boktests=boktests, bokwindow=bokwindow)
if knots==None:
if n == None:
s.uniknots(nint = knotstep, n = False)
else :
s.uniknots(nint = n, n = True)
else:
s.setintt(knots)
#if stab:
# s.unistabknots(stabknotn,n=True)
for n in range(bokit):
s.buildbounds(verbose=verbose)
s.bok(bokmethod="BF", verbose=verbose)
s.optc()
s.r2(nostab=True) # This is to set s.lastr2nostab
return s
def seasonknots(lcs, knotstep, ingap, seasongap=60.0):
"""
A little helper to get some knot locations inside of seasons only
knotstep is for inside seasons
ingap is the number of knots inside gaps.
"""
knots = []
#knotstep = 10
dp = merge(lcs, splitup=True, deltat=0.000001, sort=True, stab=False)
gaps = dp.jds[1:] - dp.jds[:-1]
gapindices = list(np.arange(len(dp.jds)-1)[gaps > seasongap])
# knots inside of seasons :
a = dp.jds[0]
for gapi in gapindices:
b = dp.jds[gapi]
#print (a, b)
knots.append(np.linspace(a, b, float(b - a)/float(knotstep)))
a = dp.jds[gapi+1]
b = dp.jds[-1]
knots.append(np.linspace(a, b, float(b - a)/float(knotstep)))
# knots inside of gaps
for gapi in gapindices:
a = dp.jds[gapi]
b = dp.jds[gapi+1]
knots.append(np.linspace(a, b, ingap+2)[1:-1])
knots = np.concatenate(knots)
knots.sort()
return knots
#print gapindices
"""
for n in range(len(gapindices)):
i = gapindices[n]
a = self.jds[i]
b = self.jds[i+1]
newgapjds = np.linspace(a, b, float(b-a)/float(self.stabstep))[1:-1]
newgapindices = i + 1 + np.zeros(len(newgapjds))
newgapmags = np.interp(newgapjds, [a, b], [self.mags[i], self.mags[i+1]])
newgapmagerrs = absstabmagerr * np.ones(newgapmags.shape)
newgapmask = np.zeros(len(newgapjds), dtype=np.bool)
self.jds = np.insert(self.jds, newgapindices, newgapjds)
knotstep
"""
def r2(lcs, spline, nosquare=False):
"""
I do not modify the spline (not even its datapoints) !
Just evaluate the quality of the match, returning an r2 (without any stab points, of course).
This is used if you want to optimize something on the lightcurves without touching the spline.
Of course, I do not touch lastr2nostab or lastr2stab of the spline ! So this has really nothing
to do with source spline optimization !
"""
myspline = spline.copy()
newdp = pycs.gen.spl.merge(lcs, stab=False) # Indeed we do not care about stabilization points here.
myspline.updatedp(newdp, dpmethod="leave")
return myspline.r2(nostab=True, nosquare=nosquare)
def mltv(lcs, spline, weight=True):
"""
Calculates the TV norm of the difference between a lightcurve (disregarding any microlensing !) and the spline.
I return the sum over the curves in lcs.
Also returns a abs(chi) like distance between the lcs without ML and the spline
If weight is True, we weight the terms in sums according to their error bars.
Idea : weight the total variation somehow by the error bars ! Not sure if needed, the spline is already weighted.
"""
#import matplotlib.pyplot as plt
tv = 0.0
dist = 0.0
for l in lcs:
# We have a spline, and a lightcurve
lmags = l.getmags(noml = True) # We get the mags without ML (but with mag and fluxshift !)
ljds = l.getjds() # Inluding any time shifts.
# Evaluating the spline at those jds :
splinemags = spline.eval(ljds)
# The residues :
res = lmags - splinemags
#plt.plot(ljds, res, "r.")
#plt.show()
if weight == False:
tv += np.sum(np.fabs(res[1:] - res[:-1]))
dist += np.sum(np.fabs(res))
else:
magerrs = l.getmagerrs()
a = res[1:]
aerrs = magerrs[1:]
b = res[:-1]
berrs = magerrs[:-1]
vari = np.fabs(a - b)
varierrs = np.sqrt(aerrs * aerrs + berrs * berrs)
tv += np.sum(vari/varierrs)
dist += np.sum(np.fabs(res) / np.fabs(magerrs))
return (tv, dist)
def optcmltv(lcs, spline, verbose=True):
"""
I will optimize the coefficients of the spline so to minimize the mltv.
I do not use the microlensing of the lcs at all !
Simple powell optimization, slow. A pity.
Add BOK and time shifts in there and it might be bingo !
Would be more efficient if we add knots on the fly
"""
inic = spline.getc(m=2)
def setc(c):
spline.setc(c, m=2)
def errorfct(c):
setc(c)
(tv, dist) = mltv(lcs, spline, weight=False)
print "put weight"
return tv + 0.1*spline.tv()
minout = spopt.fmin_powell(errorfct, inic, full_output=1, disp=verbose)
copt = minout[0]
# We find a common shift to all coeffs so that the level matches
meanc = np.mean(spline.getc(m=2))
meanmag = np.mean(np.concatenate([l.getmags(noml = True) for l in lcs]))
setc(copt)
spline.c += meanmag - meanc
|
COSMOGRAIL/PyCS
|
pycs/gen/spl.py
|
Python
|
gpl-3.0
| 40,478 | 0.046766 |
# -*- coding: utf-8 -*-
"""
***************************************************************************
FixedDistanceBuffer.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import *
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterNumber
from processing.core.outputs import OutputVector
import Buffer as buff
from processing.tools import dataobjects
class FixedDistanceBuffer(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
DISTANCE = 'DISTANCE'
SEGMENTS = 'SEGMENTS'
DISSOLVE = 'DISSOLVE'
# =========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/buffer.png")
# =========================================================================
def defineCharacteristics(self):
self.name = 'Fixed distance buffer'
self.group = 'Vector geometry tools'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterNumber(self.DISTANCE, 'Distance',
default=10.0))
self.addParameter(ParameterNumber(self.SEGMENTS, 'Segments', 1,
default=5))
self.addParameter(ParameterBoolean(self.DISSOLVE, 'Dissolve result',
False))
self.addOutput(OutputVector(self.OUTPUT, 'Buffer'))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
distance = self.getParameterValue(self.DISTANCE)
dissolve = self.getParameterValue(self.DISSOLVE)
segments = int(self.getParameterValue(self.SEGMENTS))
writer = self.getOutputFromName(
self.OUTPUT).getVectorWriter(layer.pendingFields().toList(),
QGis.WKBPolygon, layer.crs())
buff.buffering(progress, writer, distance, None, False, layer,
dissolve, segments)
|
luofei98/qgis
|
python/plugins/processing/algs/qgis/ftools/FixedDistanceBuffer.py
|
Python
|
gpl-2.0
| 3,213 | 0 |
import datetime, json, re
from consts.playoff_type import PlayoffType
from consts.district_type import DistrictType
from consts.event_type import EventType
from google.appengine.ext import ndb
from google.appengine.ext.ndb.tasklets import Future
from models.district import District
from models.event_details import EventDetails
from models.location import Location
class Event(ndb.Model):
"""
Events represent FIRST Robotics Competition events, both official and unofficial.
key_name is like '2010ct'
"""
name = ndb.StringProperty()
event_type_enum = ndb.IntegerProperty(required=True)
short_name = ndb.StringProperty(indexed=False) # Should not contain "Regional" or "Division", like "Hartford"
event_short = ndb.StringProperty(required=True, indexed=False) # Smaller abbreviation like "CT"
first_code = ndb.StringProperty() # Event code used in FIRST's API, if different from event_short
year = ndb.IntegerProperty(required=True)
event_district_enum = ndb.IntegerProperty(default=DistrictType.NO_DISTRICT) # Deprecated, use district_key instead
district_key = ndb.KeyProperty(kind=District)
start_date = ndb.DateTimeProperty()
end_date = ndb.DateTimeProperty()
playoff_type = ndb.IntegerProperty()
# venue, venue_addresss, city, state_prov, country, and postalcode are from FIRST
venue = ndb.StringProperty(indexed=False) # Name of the event venue
venue_address = ndb.StringProperty(indexed=False) # Most detailed venue address (includes venue, street, and location separated by \n)
city = ndb.StringProperty() # Equivalent to locality. From FRCAPI
state_prov = ndb.StringProperty() # Equivalent to region. From FRCAPI
country = ndb.StringProperty() # From FRCAPI
postalcode = ndb.StringProperty() # From ElasticSearch only. String because it can be like "95126-1215"
# Normalized address from the Google Maps API, constructed using the above
normalized_location = ndb.StructuredProperty(Location)
timezone_id = ndb.StringProperty() # such as 'America/Los_Angeles' or 'Asia/Jerusalem'
official = ndb.BooleanProperty(default=False) # Is the event FIRST-official?
first_eid = ndb.StringProperty() # from USFIRST
parent_event = ndb.KeyProperty() # This is the division -> event champs relationship
divisions = ndb.KeyProperty(repeated=True) # event champs -> all divisions
facebook_eid = ndb.StringProperty(indexed=False) # from Facebook
custom_hashtag = ndb.StringProperty(indexed=False) # Custom HashTag
website = ndb.StringProperty(indexed=False)
webcast_json = ndb.TextProperty(indexed=False) # list of dicts, valid keys include 'type' and 'channel'
enable_predictions = ndb.BooleanProperty(default=False)
remap_teams = ndb.JsonProperty() # Map of temporary team numbers to pre-rookie and B teams
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True, indexed=False)
def __init__(self, *args, **kw):
# store set of affected references referenced keys for cache clearing
# keys must be model properties
self._affected_references = {
'key': set(),
'year': set(),
'district_key': set()
}
self._awards = None
self._details = None
self._location = None
self._city_state_country = None
self._matches = None
self._teams = None
self._venue_address_safe = None
self._webcast = None
self._updated_attrs = [] # Used in EventManipulator to track what changed
self._week = None
super(Event, self).__init__(*args, **kw)
@ndb.tasklet
def get_awards_async(self):
from database import award_query
self._awards = yield award_query.EventAwardsQuery(self.key_name).fetch_async()
@property
def alliance_selections(self):
if self.details is None:
return None
else:
return self.details.alliance_selections
@property
def alliance_teams(self):
"""
Load a list of team keys playing in elims
"""
alliances = self.alliance_selections
if alliances is None:
return []
teams = []
for alliance in alliances:
for pick in alliance['picks']:
teams.append(pick)
return teams
@property
def awards(self):
if self._awards is None:
self.get_awards_async().wait()
return self._awards
@property
def details(self):
if self._details is None:
self._details = EventDetails.get_by_id(self.key.id())
elif type(self._details) == Future:
self._details = self._details.get_result()
return self._details
def prep_details(self):
if self._details is None:
self._details = ndb.Key(EventDetails, self.key.id()).get_async()
@property
def district_points(self):
if self.details is None:
return None
else:
return self.details.district_points
@property
def playoff_advancement(self):
if self.details is None:
return None
else:
return self.details.playoff_advancement.get(
"advancement") if self.details.playoff_advancement else None
@property
def playoff_bracket(self):
if self.details is None:
return None
else:
return self.details.playoff_advancement.get(
"bracket") if self.details.playoff_advancement else None
@ndb.tasklet
def get_matches_async(self):
if self._matches is None:
from database import match_query
self._matches = yield match_query.EventMatchesQuery(self.key_name).fetch_async()
def prep_matches(self):
if self._matches is None:
from database import match_query
self._matches = match_query.EventMatchesQuery(self.key_name).fetch_async()
@property
def matches(self):
if self._matches is None:
self.get_matches_async().wait()
elif type(self._matches) == Future:
self._matches = self._matches.get_result()
return self._matches
def time_as_utc(self, time):
import pytz
if self.timezone_id is not None:
tz = pytz.timezone(self.timezone_id)
try:
time = time - tz.utcoffset(time)
except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError): # may happen during DST
time = time - tz.utcoffset(time + datetime.timedelta(hours=1)) # add offset to get out of non-existant time
return time
def local_time(self):
import pytz
now = datetime.datetime.now()
if self.timezone_id is not None:
tz = pytz.timezone(self.timezone_id)
try:
now = now + tz.utcoffset(now)
except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError): # may happen during DST
now = now + tz.utcoffset(now + datetime.timedelta(hours=1)) # add offset to get out of non-existant time
return now
def withinDays(self, negative_days_before, days_after):
if not self.start_date or not self.end_date:
return False
now = self.local_time()
after_start = self.start_date.date() + datetime.timedelta(days=negative_days_before) <= now.date()
before_end = self.end_date.date() + datetime.timedelta(days=days_after) >= now.date()
return (after_start and before_end)
@property
def now(self):
if self.timezone_id is not None:
return self.withinDays(0, 0)
else:
return self.within_a_day # overestimate what is "now" if no timezone
@property
def within_a_day(self):
return self.withinDays(-1, 1)
@property
def past(self):
return self.end_date.date() < self.local_time().date() and not self.now
@property
def future(self):
return self.start_date.date() > self.local_time().date() and not self.now
@property
def starts_today(self):
return self.start_date.date() == self.local_time().date()
@property
def ends_today(self):
return self.end_date.date() == self.local_time().date()
@property
def week(self):
"""
Returns the week of the event relative to the first official season event as an integer
Returns None if the event is not of type NON_CMP_EVENT_TYPES or is not official
"""
if self.event_type_enum not in EventType.NON_CMP_EVENT_TYPES or not self.official:
return None
if self._week:
return self._week
# Cache week_start for the same context
from context_cache import context_cache
cache_key = '{}_season_start:{}'.format(self.year, ndb.get_context().__hash__())
season_start = context_cache.get(cache_key)
if season_start is None:
e = Event.query(
Event.year==self.year,
Event.event_type_enum.IN(EventType.NON_CMP_EVENT_TYPES),
Event.start_date!=None
).order(Event.start_date).fetch(1, projection=[Event.start_date])
if e:
first_start_date = e[0].start_date
days_diff = 0
# Before 2018, event weeks start on Wednesdays
if self.year < 2018:
days_diff = 2 # 2 is Wednesday
# Find the closest start weekday (Monday or Wednesday) to the first event - this is our season start
diff_from_week_start = (first_start_date.weekday() - days_diff) % 7
diff_from_week_start = min([diff_from_week_start, diff_from_week_start - 7], key=abs)
season_start = first_start_date - datetime.timedelta(days=diff_from_week_start)
else:
season_start = None
context_cache.set(cache_key, season_start)
if self._week is None and season_start is not None:
# Round events that occur just before the official start-of-season to the closest week
days = max((self.start_date - season_start).days, 0)
self._week = days / 7
return self._week
@property
def week_str(self):
if self.week is None:
return None
if self.year == 2016:
return "Week {}".format(0.5 if self.week == 0 else self.week)
return "Week {}".format(self.week + 1)
@property
def is_season_event(self):
return self.event_type_enum in EventType.SEASON_EVENT_TYPES
@ndb.tasklet
def get_teams_async(self):
from database import team_query
self._teams = yield team_query.EventTeamsQuery(self.key_name).fetch_async()
@property
def teams(self):
if self._teams is None:
self.get_teams_async().wait()
return self._teams
@ndb.toplevel
def prepAwardsMatchesTeams(self):
yield self.get_awards_async(), self.get_matches_async(), self.get_teams_async()
@ndb.toplevel
def prepTeams(self):
yield self.get_teams_async()
@ndb.toplevel
def prepTeamsMatches(self):
yield self.get_matches_async(), self.get_teams_async()
@property
def matchstats(self):
if self.details is None:
return None
else:
return self.details.matchstats
@property
def rankings(self):
if self.details is None:
return None
else:
return self.details.rankings
@property
def location(self):
if self._location is None:
split_location = []
if self.city:
split_location.append(self.city)
if self.state_prov:
if self.postalcode:
split_location.append(self.state_prov + ' ' + self.postalcode)
else:
split_location.append(self.state_prov)
if self.country:
split_location.append(self.country)
self._location = ', '.join(split_location)
return self._location
@property
def city_state_country(self):
if not self._city_state_country and self.nl:
self._city_state_country = self.nl.city_state_country
if not self._city_state_country:
location_parts = []
if self.city:
location_parts.append(self.city)
if self.state_prov:
location_parts.append(self.state_prov)
if self.country:
country = self.country
if self.country == 'US':
country = 'USA'
location_parts.append(country)
self._city_state_country = ', '.join(location_parts)
return self._city_state_country
@property
def nl(self):
return self.normalized_location
@property
def venue_or_venue_from_address(self):
if self.venue:
return self.venue
else:
try:
return self.venue_address.split('\r\n')[0]
except:
return None
@property
def venue_address_safe(self):
"""
Construct (not detailed) venue address if detailed venue address doesn't exist
"""
if not self.venue_address:
if not self.venue or not self.location:
self._venue_address_safe = None
else:
self._venue_address_safe = "{}\n{}".format(self.venue.encode('utf-8'), self.location.encode('utf-8'))
else:
self._venue_address_safe = self.venue_address.replace('\r\n', '\n')
return self._venue_address_safe
@property
def webcast(self):
"""
Lazy load parsing webcast JSON
"""
if self._webcast is None:
try:
self._webcast = json.loads(self.webcast_json)
# Sort firstinspires channels to the front, keep the order of the rest
self._webcast = sorted(self._webcast, key=lambda w: 0 if (w['type'] == 'twitch' and w['channel'].startswith('firstinspires')) else 1)
except Exception, e:
self._webcast = None
return self._webcast
@property
def webcast_status(self):
from helpers.webcast_online_helper import WebcastOnlineHelper
WebcastOnlineHelper.add_online_status(self.current_webcasts)
overall_status = 'offline'
for webcast in self.current_webcasts:
status = webcast.get('status')
if status == 'online':
overall_status = 'online'
break
elif status == 'unknown':
overall_status = 'unknown'
return overall_status
@property
def current_webcasts(self):
if not self.webcast or not self.within_a_day:
return []
# Filter by date
current_webcasts = []
for webcast in self.webcast:
if 'date' in webcast:
webcast_datetime = datetime.datetime.strptime(webcast['date'], "%Y-%m-%d")
if self.local_time().date() == webcast_datetime.date():
current_webcasts.append(webcast)
else:
current_webcasts.append(webcast)
return current_webcasts
@property
def online_webcasts(self):
current_webcasts = self.current_webcasts
from helpers.webcast_online_helper import WebcastOnlineHelper
WebcastOnlineHelper.add_online_status(current_webcasts)
return filter(lambda x: x.get('status', '') != 'offline', current_webcasts if current_webcasts else [])
@property
def has_first_official_webcast(self):
return any([('firstinspires' in w['channel']) for w in self.webcast]) if self.webcast else False
@property
def division_keys_json(self):
keys = [key.id() for key in self.divisions]
return json.dumps(keys)
@property
def key_name(self):
"""
Returns the string of the key_name of the Event object before writing it.
"""
return str(self.year) + self.event_short
@property
def facebook_event_url(self):
"""
Return a string of the Facebook Event URL.
"""
return "http://www.facebook.com/event.php?eid=%s" % self.facebook_eid
@property
def details_url(self):
"""
Returns the URL pattern for the link to this Event on TBA
"""
return "/event/%s" % self.key_name
@property
def gameday_url(self):
"""
Returns the URL pattern for the link to watch webcasts in Gameday
"""
if self.webcast:
return "/gameday/{}".format(self.key_name)
else:
return None
@property
def hashtag(self):
"""
Return the hashtag used for the event.
"""
if self.custom_hashtag:
return self.custom_hashtag
else:
return "frc" + self.event_short
# Depreciated, still here to keep GAE clean.
webcast_url = ndb.StringProperty(indexed=False)
@classmethod
def validate_key_name(self, event_key):
key_name_regex = re.compile(r'^[1-9]\d{3}[a-z]+[0-9]{0,2}$')
match = re.match(key_name_regex, event_key)
return True if match else False
@property
def event_district_str(self):
from database.district_query import DistrictQuery
if self.district_key is None:
return None
district = DistrictQuery(self.district_key.id()).fetch()
return district.display_name if district else None
@property
def event_district_abbrev(self):
if self.district_key is None:
return None
else:
return self.district_key.id()[4:]
@property
def event_district_key(self):
if self.district_key is None:
return None
else:
return self.district_key.id()
@property
def event_type_str(self):
return EventType.type_names.get(self.event_type_enum)
@property
def display_name(self):
return self.name if self.short_name is None else self.short_name
@property
def normalized_name(self):
if self.event_type_enum == EventType.CMP_FINALS:
if self.year >= 2017:
return '{} {}'.format(self.city, 'Championship')
else:
return 'Championship'
elif self.short_name and self.event_type_enum != EventType.FOC:
if self.event_type_enum == EventType.OFFSEASON:
return self.short_name
else:
return '{} {}'.format(self.short_name, EventType.short_type_names[self.event_type_enum])
else:
return self.name
@property
def first_api_code(self):
if self.first_code is None:
return self.event_short.upper()
return self.first_code.upper()
@property
def is_in_season(self):
"""
If the Event is of a regular season type.
"""
return self.event_type_enum in EventType.SEASON_EVENT_TYPES
@property
def is_offseason(self):
"""
'Offseason' events include preseason, offseason, unlabeled events.
"""
return not self.is_in_season
@property
def next_match(self):
from helpers.match_helper import MatchHelper
upcoming_matches = MatchHelper.upcomingMatches(self.matches, 1)
if upcoming_matches:
return upcoming_matches[0]
else:
return None
@property
def previous_match(self):
from helpers.match_helper import MatchHelper
recent_matches = MatchHelper.recentMatches(self.matches, 1)[0]
if recent_matches:
return recent_matches[0]
else:
return None
def team_awards(self):
# Returns a dictionary of awards for teams
team_awards = {} # Key is a Team key, value is an array of Awards that team won
for award in self.awards:
for team_key in award.team_list:
a = team_awards.get(team_key, [])
a.append(award)
team_awards[team_key] = a
return team_awards
|
phil-lopreiato/the-blue-alliance
|
models/event.py
|
Python
|
mit
| 20,440 | 0.002544 |
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import uuidutils
from urllib.parse import parse_qs
from urllib.parse import urlparse
from designateclient import exceptions
def resolve_by_name(func, name, *args):
"""
Helper to resolve a "name" a'la foo.com to it's ID by using REST api's
query support and filtering on name.
"""
if uuidutils.is_uuid_like(name):
return name
results = func(criterion={"name": "%s" % name}, *args)
length = len(results)
if length == 1:
return results[0]["id"]
elif length == 0:
raise exceptions.NotFound("Name %s didn't resolve" % name)
else:
msg = "Multiple matches found for %s, please use ID instead." % name
raise exceptions.NoUniqueMatch(msg)
def parse_query_from_url(url):
"""
Helper to get key bits of data from the "next" url returned
from the API on collections
:param url:
:return: dict
"""
values = parse_qs(urlparse(url)[4])
return {k: values[k][0] for k in values.keys()}
def get_all(function, criterion=None, args=None):
"""
:param function: Function to be called to get data
:param criterion: dict of filters to be applied
:param args: arguments to be given to the function
:return: DesignateList()
"""
criterion = criterion or {}
args = args or []
data = function(*args, criterion=criterion)
returned_data = data
while True:
if data.next_page:
for k, v in data.next_link_criterion.items():
criterion[k] = v
data = function(*args, criterion=criterion)
returned_data.extend(data)
else:
break
return returned_data
|
openstack/python-designateclient
|
designateclient/v2/utils.py
|
Python
|
apache-2.0
| 2,334 | 0 |
from .. import tool
def test_keygen():
def get_keyring():
WheelKeys, keyring = tool.get_keyring()
class WheelKeysTest(WheelKeys):
def save(self):
pass
class keyringTest:
backend = keyring.backend
@classmethod
def get_keyring(cls):
class keyringTest2:
pw = None
def set_password(self, a, b, c):
self.pw = c
def get_password(self, a, b):
return self.pw
return keyringTest2()
return WheelKeysTest, keyringTest
tool.keygen(get_keyring=get_keyring)
|
IsCoolEntertainment/debpkg_python-wheel
|
wheel/test/test_tool.py
|
Python
|
mit
| 746 | 0.013405 |
# -*- test-case-name: foolscap.test.test_crypto -*-
available = False # hack to deal with half-broken imports in python <2.4
from OpenSSL import SSL
# we try to use ssl support classes from Twisted, if it is new enough. If
# not, we pull them from a local copy of sslverify. The funny '_ssl' import
# stuff is used to appease pyflakes, which otherwise complains that we're
# redefining an imported name.
from twisted.internet import ssl
if hasattr(ssl, "DistinguishedName"):
# Twisted-2.5 will contain these names
_ssl = ssl
CertificateOptions = ssl.CertificateOptions
else:
# but it hasn't been released yet (as of 16-Sep-2006). Without them, we
# cannot use any encrypted Tubs. We fall back to using a private copy of
# sslverify.py, copied from the Divmod tree.
import sslverify
_ssl = sslverify
from sslverify import OpenSSLCertificateOptions as CertificateOptions
DistinguishedName = _ssl.DistinguishedName
KeyPair = _ssl.KeyPair
Certificate = _ssl.Certificate
PrivateCertificate = _ssl.PrivateCertificate
from twisted.internet import error
if hasattr(error, "CertificateError"):
# Twisted-2.4 contains this, and it is used by twisted.internet.ssl
CertificateError = error.CertificateError
else:
class CertificateError(Exception):
"""
We did not find a certificate where we expected to find one.
"""
from foolscap import base32
peerFromTransport = Certificate.peerFromTransport
class MyOptions(CertificateOptions):
def _makeContext(self):
ctx = CertificateOptions._makeContext(self)
def alwaysValidate(conn, cert, errno, depth, preverify_ok):
# This function is called to validate the certificate received by
# the other end. OpenSSL calls it multiple times, each time it
# see something funny, to ask if it should proceed.
# We do not care about certificate authorities or revocation
# lists, we just want to know that the certificate has a valid
# signature and follow the chain back to one which is
# self-signed. The TubID will be the digest of one of these
# certificates. We need to protect against forged signatures, but
# not the usual SSL concerns about invalid CAs or revoked
# certificates.
# these constants are from openssl-0.9.7g/crypto/x509/x509_vfy.h
# and do not appear to be exposed by pyopenssl. Ick. TODO. We
# could just always return '1' here (ignoring all errors), but I
# think that would ignore forged signatures too, which would
# obviously be a security hole.
things_are_ok = (0, # X509_V_OK
9, # X509_V_ERR_CERT_NOT_YET_VALID
10, # X509_V_ERR_CERT_HAS_EXPIRED
18, # X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT
19, # X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN
)
if errno in things_are_ok:
return 1
# TODO: log the details of the error, because otherwise they get
# lost in the PyOpenSSL exception that will eventually be raised
# (possibly OpenSSL.SSL.Error: certificate verify failed)
# I think that X509_V_ERR_CERT_SIGNATURE_FAILURE is the most
# obvious sign of hostile attack.
return 0
# VERIFY_PEER means we ask the the other end for their certificate.
# not adding VERIFY_FAIL_IF_NO_PEER_CERT means it's ok if they don't
# give us one (i.e. if an anonymous client connects to an
# authenticated server). I don't know what VERIFY_CLIENT_ONCE does.
ctx.set_verify(SSL.VERIFY_PEER |
#SSL.VERIFY_FAIL_IF_NO_PEER_CERT |
SSL.VERIFY_CLIENT_ONCE,
alwaysValidate)
return ctx
def digest32(colondigest):
digest = "".join([chr(int(c,16)) for c in colondigest.split(":")])
digest = base32.encode(digest)
return digest
available = True
|
pexip/os-foolscap
|
foolscap/crypto.py
|
Python
|
mit
| 4,123 | 0.003638 |
# Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import unittest
from quantumclient.common import exceptions
from quantumclient.quantum import v2_0 as quantumV20
class CLITestArgs(unittest.TestCase):
def test_empty(self):
_mydict = quantumV20.parse_args_to_dict([])
self.assertEqual({}, _mydict)
def test_default_bool(self):
_specs = ['--my_bool', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_true(self):
_specs = ['--my-bool', 'type=bool', 'true', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue(_mydict['my_bool'])
def test_bool_false(self):
_specs = ['--my_bool', 'type=bool', 'false', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertFalse(_mydict['my_bool'])
def test_nargs(self):
_specs = ['--tag', 'x', 'y', '--arg1', 'value1']
_mydict = quantumV20.parse_args_to_dict(_specs)
self.assertTrue('x' in _mydict['tag'])
self.assertTrue('y' in _mydict['tag'])
def test_badarg(self):
_specs = ['--tag=t', 'x', 'y', '--arg1', 'value1']
self.assertRaises(exceptions.CommandError,
quantumV20.parse_args_to_dict, _specs)
def test_arg(self):
_specs = ['--tag=t', '--arg1', 'value1']
self.assertEqual('value1',
quantumV20.parse_args_to_dict(_specs)['arg1'])
def test_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict', 'key1=value1,key2=value2']
arg1 = quantumV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1['key1'])
self.assertEqual('value2', arg1['key2'])
def test_list_of_dict_arg(self):
_specs = ['--tag=t', '--arg1', 'type=dict',
'list=true', 'key1=value1,key2=value2']
arg1 = quantumV20.parse_args_to_dict(_specs)['arg1']
self.assertEqual('value1', arg1[0]['key1'])
self.assertEqual('value2', arg1[0]['key2'])
|
redhat-openstack/python-neutronclient
|
quantumclient/tests/unit/test_casual_args.py
|
Python
|
apache-2.0
| 2,739 | 0 |
from kvmagent import kvmagent
from zstacklib.utils import jsonobject
from zstacklib.utils import http
from zstacklib.utils import log
from zstacklib.utils.bash import *
from zstacklib.utils import linux
from zstacklib.utils import thread
from jinja2 import Template
import os.path
import re
import time
import traceback
from prometheus_client.core import GaugeMetricFamily,REGISTRY
from prometheus_client import start_http_server
logger = log.get_logger(__name__)
class PrometheusPlugin(kvmagent.KvmAgent):
COLLECTD_PATH = "/prometheus/collectdexporter/start"
@kvmagent.replyerror
@in_bash
def start_collectd_exporter(self, req):
cmd = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
eths = bash_o("ls /sys/class/net").split()
interfaces = []
for eth in eths:
eth = eth.strip(' \t\n\r')
if eth == 'lo': continue
elif eth.startswith('vnic'): continue
elif eth.startswith('outer'): continue
elif eth.startswith('br_'): continue
elif not eth: continue
else:
interfaces.append(eth)
conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')
conf = '''Interval {{INTERVAL}}
FQDNLookup false
LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt
<Plugin aggregation>
<Aggregation>
#Host "unspecified"
Plugin "cpu"
#PluginInstance "unspecified"
Type "cpu"
#TypeInstance "unspecified"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateNum false
CalculateSum false
CalculateAverage true
CalculateMinimum false
CalculateMaximum false
CalculateStddev false
</Aggregation>
</Plugin>
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
<Plugin disk>
Disk "/^sd/"
Disk "/^hd/"
Disk "/^vd/"
IgnoreSelected false
</Plugin>
<Plugin "interface">
{% for i in INTERFACES -%}
Interface "{{i}}"
{% endfor -%}
IgnoreSelected false
</Plugin>
<Plugin memory>
ValuesAbsolute true
ValuesPercentage false
</Plugin>
<Plugin virt>
Connection "qemu:///system"
RefreshInterval {{INTERVAL}}
HostnameFormat name
PluginInstanceFormat name
</Plugin>
<Plugin network>
Server "localhost" "25826"
</Plugin>
'''
tmpt = Template(conf)
conf = tmpt.render({
'INTERVAL': cmd.interval,
'INTERFACES': interfaces,
})
need_restart_collectd = False
if os.path.exists(conf_path):
with open(conf_path, 'r') as fd:
old_conf = fd.read()
if old_conf != conf:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
else:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
pid = linux.find_process_by_cmdline(['collectd', conf_path])
if not pid:
bash_errorout('collectd -C %s' % conf_path)
else:
if need_restart_collectd:
bash_errorout('kill -9 %s' % pid)
bash_errorout('collectd -C %s' % conf_path)
pid = linux.find_process_by_cmdline([cmd.binaryPath])
if not pid:
EXPORTER_PATH = cmd.binaryPath
LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')
ARGUMENTS = cmd.startupArguments
if not ARGUMENTS:
ARGUMENTS = ""
bash_errorout('chmod +x {{EXPORTER_PATH}}')
bash_errorout("nohup {{EXPORTER_PATH}} {{ARGUMENTS}} >{{LOG_FILE}} 2>&1 < /dev/null &\ndisown")
return jsonobject.dumps(rsp)
def install_colletor(self):
class Collector(object):
def collect(self):
try:
ret = []
for c in kvmagent.metric_collectors:
ret.extend(c())
return ret
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\n' % (str(e), content)
logger.warn(err)
return []
REGISTRY.register(Collector())
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.COLLECTD_PATH, self.start_collectd_exporter)
self.install_colletor()
start_http_server(7069)
def stop(self):
pass
|
live4thee/zstack-utility
|
kvmagent/kvmagent/plugins/prometheus.py
|
Python
|
apache-2.0
| 4,603 | 0.008473 |
import pytest
from tests.support.asserts import assert_error, assert_success
def perform_actions(session, actions):
return session.transport.send(
"POST",
"/session/{session_id}/actions".format(session_id=session.session_id),
{"actions": actions})
@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
def test_pause_positive_integer(session, action_type):
for valid_duration in [0, 1]:
actions = [{
"type": action_type,
"id": "foobar",
"actions": [{
"type": "pause",
"duration": valid_duration
}]
}]
response = perform_actions(session, actions)
assert_success(response)
actions = [{
"type": action_type,
"id": "foobar",
"actions": [{
"type": "pause",
"duration": -1
}]
}]
response = perform_actions(session, actions)
assert_error(response, "invalid argument")
@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
def test_pause_invalid_types(session, action_type):
for invalid_type in [0.0, None, "foo", True, [], {}]:
actions = [{
"type": action_type,
"id": "foobar",
"actions": [{
"type": "pause",
"duration": invalid_type
}]
}]
response = perform_actions(session, actions)
assert_error(response, "invalid argument")
@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
def test_pause_without_duration(session, action_type):
actions = [{
"type": action_type,
"id": "foobar",
"actions": [{
"type": "pause",
}]
}]
response = perform_actions(session, actions)
assert_success(response)
@pytest.mark.parametrize("action_type", ["none", "key", "pointer"])
def test_action_without_id(session, action_type):
actions = [{
"type": action_type,
"actions": [{
"type": "pause",
"duration": 1
}]
}]
response = perform_actions(session, actions)
assert_error(response, "invalid argument")
|
paulrouget/servo
|
tests/wpt/web-platform-tests/webdriver/tests/perform_actions/validity.py
|
Python
|
mpl-2.0
| 2,188 | 0 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import time
from geopy.distance import great_circle
from s2sphere import Cell, CellId, LatLng
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.item_list import Item
from pokemongo_bot.walkers.polyline_walker import PolylineWalker
from pokemongo_bot.walkers.step_walker import StepWalker
from pokemongo_bot.worker_result import WorkerResult
class PokemonHunter(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(PokemonHunter, self).__init__(bot, config)
def initialize(self):
self.destination = None
self.walker = None
self.search_cell_id = None
self.search_points = []
self.lost_counter = 0
self.no_log_until = 0
self.config_max_distance = self.config.get("max_distance", 2000)
self.config_hunt_all = self.config.get("hunt_all", False)
self.config_hunt_vip = self.config.get("hunt_vip", True)
self.config_hunt_pokedex = self.config.get("hunt_pokedex", True)
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
if self.get_pokeball_count() <= 0:
self.destination = None
self.last_cell_id = None
return WorkerResult.SUCCESS
now = time.time()
pokemons = self.get_nearby_pokemons()
if self.destination is None:
worth_pokemons = self.get_worth_pokemons(pokemons)
if len(worth_pokemons) > 0:
self.destination = worth_pokemons[0]
self.lost_counter = 0
self.logger.info("New destination at %(distance).2f meters: %(name)s", self.destination)
self.no_log_until = now + 60
if self.destination["s2_cell_id"] != self.search_cell_id:
self.search_points = self.get_search_points(self.destination["s2_cell_id"])
self.walker = PolylineWalker(self.bot, self.search_points[0][0], self.search_points[0][1])
self.search_cell_id = self.destination["s2_cell_id"]
self.search_points = self.search_points[1:] + self.search_points[:1]
else:
if self.no_log_until < now:
self.logger.info("There is no nearby pokemon worth hunting down [%s]", ", ".join(p["name"] for p in pokemons))
self.no_log_until = now + 120
self.last_cell_id = None
return WorkerResult.SUCCESS
if any(self.destination["encounter_id"] == p["encounter_id"] for p in self.bot.cell["catchable_pokemons"] + self.bot.cell["wild_pokemons"]):
self.destination = None
elif self.walker.step():
if not any(self.destination["encounter_id"] == p["encounter_id"] for p in pokemons):
self.lost_counter += 1
else:
self.lost_counter = 0
if self.lost_counter >= 3:
self.destination = None
else:
self.logger.info("Now searching for %(name)s", self.destination)
self.walker = StepWalker(self.bot, self.search_points[0][0], self.search_points[0][1])
self.search_points = self.search_points[1:] + self.search_points[:1]
elif self.no_log_until < now:
distance = great_circle(self.bot.position, (self.walker.dest_lat, self.walker.dest_lng)).meters
self.logger.info("Moving to destination at %s meters: %s", round(distance, 2), self.destination["name"])
self.no_log_until = now + 30
return WorkerResult.RUNNING
def get_pokeball_count(self):
return sum([inventory.items().get(ball.value).count for ball in [Item.ITEM_POKE_BALL, Item.ITEM_GREAT_BALL, Item.ITEM_ULTRA_BALL]])
def get_nearby_pokemons(self):
radius = self.config_max_distance
pokemons = [p for p in self.bot.cell["nearby_pokemons"] if self.get_distance(self.bot.start_position, p) <= radius]
for pokemon in pokemons:
pokemon["distance"] = self.get_distance(self.bot.position, p)
pokemon["name"] = inventory.pokemons().name_for(pokemon["pokemon_id"])
pokemons.sort(key=lambda p: p["distance"])
return pokemons
def get_worth_pokemons(self, pokemons):
if self.config_hunt_all:
worth_pokemons = pokemons
else:
worth_pokemons = []
if self.config_hunt_vip:
worth_pokemons += [p for p in pokemons if p["name"] in self.bot.config.vips]
if self.config_hunt_pokedex:
worth_pokemons += [p for p in pokemons if (p not in worth_pokemons) and any(not inventory.pokedex().seen(fid) for fid in self.get_family_ids(p))]
worth_pokemons.sort(key=lambda p: inventory.candies().get(p["pokemon_id"]).quantity)
return worth_pokemons
def get_family_ids(self, pokemon):
family_id = inventory.pokemons().data_for(pokemon["pokemon_id"]).first_evolution_id
ids = [family_id]
ids += inventory.pokemons().data_for(family_id).next_evolutions_all[:]
return ids
def get_distance(self, location, pokemon):
return great_circle(location, (pokemon["latitude"], pokemon["longitude"])).meters
def get_search_points(self, cell_id):
points = []
# For cell level 15
for c in Cell(CellId(cell_id)).subdivide():
for cc in c.subdivide():
latlng = LatLng.from_point(cc.get_center())
point = (latlng.lat().degrees, latlng.lng().degrees)
points.append(point)
points[0], points[1] = points[1], points[0]
points[14], points[15] = points[15], points[14]
point = points.pop(2)
points.insert(7, point)
point = points.pop(13)
points.insert(8, point)
closest = min(points, key=lambda p: great_circle(self.bot.position, p).meters)
index = points.index(closest)
return points[index:] + points[:index]
|
cmezh/PokemonGo-Bot
|
pokemongo_bot/cell_workers/pokemon_hunter.py
|
Python
|
mit
| 6,128 | 0.003427 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Persian():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Persian"]))
|
xbmcmegapack/plugin.video.megapack.dev
|
resources/lib/menus/home_languages_persian.py
|
Python
|
gpl-3.0
| 1,111 | 0.002705 |
#! /usr/bin/env python
"""
Module with simplex (Nelder-Mead) optimization for defining the flux and
position of a companion using the Negative Fake Companion.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from .simplex_fmerit import chisquare
from ..var import frame_center
from ..conf import time_ini, timing, sep
__all__ = ['firstguess_from_coord',
'firstguess_simplex',
'firstguess']
def firstguess_from_coord(planet, center, cube, angs, PLSC, psf,
fwhm, annulus_width, aperture_radius, ncomp,
cube_ref=None, svd_mode='lapack', scaling=None,
fmerit='sum', collapse='median', f_range=None,
display=False, verbose=True, save=False, **kwargs):
"""
Determine a first guess for the flux of a companion at a given position
in the cube by doing a simple grid search evaluating the reduced chi2.
Parameters
----------
planet: numpy.array
The (x,y) position of the planet in the pca processed cube.
center: numpy.array
The (x,y) position of the cube center.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
PLSC: float
The platescale, in arcsec per pixel.
psf: numpy.array
The scaled psf expressed as a numpy.array.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
ncomp: int
The number of principal components.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
display: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: boolean
If True, display intermediate info in the shell.
save: boolean, optional
If True, the figure chi2 vs. flux is saved.
kwargs: dict, optional
Additional parameters are passed to the matplotlib plot method.
Returns
-------
out : numpy.array
The radial coordinates and the flux of the companion.
"""
xy = planet-center
r0= np.sqrt(xy[0]**2+xy[1]**2)
theta0 = np.mod(np.arctan2(xy[1],xy[0])/np.pi*180,360)
if f_range is not None:
n = f_range.shape[0]
else:
n = 20
f_range = np.linspace(0,5000,n)
chi2r = []
if verbose:
print('Step | flux | chi2r')
counter = 0
for j, f_guess in enumerate(f_range):
chi2r.append(chisquare((r0,theta0,f_guess), cube, angs, PLSC, psf,
fwhm, annulus_width, aperture_radius,(r0,theta0),
ncomp, cube_ref, svd_mode, scaling, fmerit,
collapse))
if chi2r[j] > chi2r[j-1]: counter+=1
if counter == 4: break
if verbose:
print('{}/{} {:.3f} {:.3f}'.format(j+1,n,f_guess,chi2r[j]))
chi2r = np.array(chi2r)
f0 = f_range[chi2r.argmin()]
if display:
plt.figure(figsize=kwargs.pop('figsize',(8,4)))
plt.title(kwargs.pop('title',''))
plt.xlim(f_range[0], f_range[:chi2r.shape[0]].max())
plt.ylim(chi2r.min()*0.9, chi2r.max()*1.1)
plt.plot(f_range[:chi2r.shape[0]],chi2r,
linestyle = kwargs.pop('linestyle','-'),
color = kwargs.pop('color','gray'),
marker = kwargs.pop('marker','.'),
markerfacecolor='r', markeredgecolor='r', **kwargs)
plt.xlabel('flux')
plt.ylabel(r'$\chi^2_{r}$')
plt.grid('on')
if save:
plt.savefig('chi2rVSflux.pdf')
if display:
plt.show()
return (r0,theta0,f0)
def firstguess_simplex(p, cube, angs, psf, plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=None, svd_mode='lapack',
scaling=None, fmerit='sum', collapse='median', p_ini=None,
options=None, verbose=False, **kwargs):
"""
Determine the position of a companion using the negative fake companion
technique and a standard minimization algorithm (Default=Nelder-Mead) .
Parameters
----------
p : np.array
Estimate of the candidate position.
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psf: numpy.array
The scaled psf expressed as a numpy.array.
plsc: float
The platescale, in arcsec per pixel.
ncomp: int
The number of principal components.
fwhm : float
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
p_ini : np.array
Position (r, theta) of the circular aperture center.
options: dict, optional
The scipy.optimize.minimize options.
verbose : boolean, optional
If True, informations are displayed in the shell.
Returns
-------
out : scipy.optimize.minimize solution object
The solution of the minimization algorithm.
"""
if verbose:
print('')
print('{} minimization is running...'.format(options.get('method','Nelder-Mead')))
if p_ini is None:
p_ini = p
solu = minimize(chisquare, p, args=(cube, angs, plsc, psf, fwhm, annulus_width,
aperture_radius, p_ini, ncomp, cube_ref,
svd_mode, scaling, fmerit, collapse),
method = options.pop('method','Nelder-Mead'),
options=options, **kwargs)
if verbose: print(solu)
return solu
def firstguess(cube, angs, psfn, ncomp, plsc, planets_xy_coord, fwhm=4,
annulus_width=3, aperture_radius=4, cube_ref=None,
svd_mode='lapack', scaling=None, fmerit='sum', collapse='median',
p_ini=None, f_range=None, simplex=True, simplex_options=None,
display=False, verbose=True, save=False, figure_options=None):
""" Determines a first guess for the position and the flux of a planet.
We process the cube without injecting any negative fake companion.
This leads to the visual detection of the planet(s). For each of them,
one can estimate the (x,y) coordinates in pixel for the position of the
star, as well as the planet(s).
From the (x,y) coordinates in pixels for the star and planet(s), we can
estimate a preliminary guess for the position and flux for each planet
by using the method "firstguess_from_coord". The argument "f_range" allows
to indicate prior limits for the flux (optional, default: None).
This step can be reiterate to refine the preliminary guess for the flux.
We can go a step further by using a Simplex Nelder_Mead minimization to
estimate the first guess based on the preliminary guess.
Parameters
----------
cube: numpy.array
The cube of fits images expressed as a numpy.array.
angs: numpy.array
The parallactic angle fits image expressed as a numpy.array.
psfn: numpy.array
The centered and normalized (flux in a 1*FWHM aperture must equal 1)
PSF 2d-array.
ncomp: int
The number of principal components.
plsc: float
The platescale, in arcsec per pixel.
planet_xy_coord: array or list
The list of (x,y) positions of the planets.
fwhm : float, optional
The FHWM in pixels.
annulus_width: int, optional
The width in terms of the FWHM of the annulus on which the PCA is done.
aperture_radius: int, optional
The radius of the circular aperture in terms of the FWHM.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
svd_mode : {'lapack', 'randsvd', 'eigen', 'arpack'}, str optional
Switch for different ways of computing the SVD and selected PCs.
scaling : {'temp-mean', 'temp-standard'} or None, optional
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done and with
"temp-standard" temporal mean centering plus scaling to unit variance
is done.
fmerit : {'sum', 'stddev'}, string optional
Chooses the figure of merit to be used. stddev works better for close in
companions sitting on top of speckle noise.
collapse : {'median', 'mean', 'sum', 'trimmean', None}, str or None, optional
Sets the way of collapsing the frames for producing a final image. If
None then the cube of residuals is used when measuring the function of
merit (instead of a single final frame).
p_ini: numpy.array
Position (r, theta) of the circular aperture center.
f_range: numpy.array, optional
The range of flux tested values. If None, 20 values between 0 and 5000
are tested.
simplex: boolean, optional
If True, the Nelder-Mead minimization is performed after the flux grid
search.
simplex_options: dict, optional
The scipy.optimize.minimize options.
display: boolean, optional
If True, the figure chi2 vs. flux is displayed.
verbose: boolean
If True, display intermediate info in the shell.
save: boolean, optional
If True, the figure chi2 vs. flux is saved.
figure_options: dict, optional
Additional parameters are passed to the matplotlib plot method.
Returns
-------
out : The radial coordinates and the flux of the companion.
WARNING: POLAR ANGLE IS NOT THE CONVENTIONAL NORTH-TO-EAST P.A.
"""
if verbose: start_time = time_ini()
if figure_options is None:
figure_options = {'color':'gray', 'marker':'.',
'title':r'$\chi^2_{r}$ vs flux'}
planets_xy_coord = np.array(planets_xy_coord)
n_planet = planets_xy_coord.shape[0]
center_xy_coord = np.array(frame_center(cube[0]))
if f_range is None:
f_range = np.linspace(0,5000,20)
if simplex_options is None:
simplex_options = {'xtol':1e-1, 'maxiter':500, 'maxfev':1000}
r_0 = np.zeros(n_planet)
theta_0 = np.zeros_like(r_0)
f_0 = np.zeros_like(r_0)
for index_planet in range(n_planet):
if verbose:
print('')
print(sep)
print(' Planet {} '.format(index_planet))
print(sep)
print('')
msg2 = 'Planet {}: flux estimation at the position [{},{}], running ...'
print(msg2.format(index_planet,planets_xy_coord[index_planet,0],
planets_xy_coord[index_planet,1]))
res_init = firstguess_from_coord(planets_xy_coord[index_planet],
center_xy_coord, cube, angs, plsc, psfn,
fwhm, annulus_width, aperture_radius,
ncomp, f_range=f_range,
cube_ref=cube_ref, svd_mode=svd_mode,
scaling=scaling, fmerit=fmerit,
collapse=collapse, display=display,
verbose=verbose, save=save,
**figure_options)
r_pre, theta_pre, f_pre = res_init
if verbose:
msg3 = 'Planet {}: preliminary guess: (r, theta, f)=({:.1f}, {:.1f}, {:.1f})'
print(msg3.format(index_planet,r_pre, theta_pre, f_pre))
if simplex:
if verbose:
msg4 = 'Planet {}: Simplex Nelder-Mead minimization, running ...'
print(msg4.format(index_planet))
res = firstguess_simplex((r_pre,theta_pre,f_pre), cube, angs, psfn,
plsc, ncomp, fwhm, annulus_width,
aperture_radius, cube_ref=cube_ref,
svd_mode=svd_mode, scaling=scaling,
fmerit=fmerit, collapse=collapse, p_ini=p_ini,
options=simplex_options, verbose=False)
r_0[index_planet], theta_0[index_planet], f_0[index_planet] = res.x
if verbose:
msg5 = 'Planet {}: Success: {}, nit: {}, nfev: {}, chi2r: {}'
print(msg5.format(index_planet,res.success,res.nit,res.nfev,
res.fun))
print('message: {}'.format(res.message))
else:
if verbose:
msg4bis = 'Planet {}: Simplex Nelder-Mead minimization skipped.'
print(msg4bis.format(index_planet))
r_0[index_planet] = r_pre
theta_0[index_planet] = theta_pre
f_0[index_planet] = f_pre
if verbose:
centy, centx = frame_center(cube[0])
posy = r_0 * np.sin(np.deg2rad(theta_0[index_planet])) + centy
posx = r_0 * np.cos(np.deg2rad(theta_0[index_planet])) + centx
msg6 = 'Planet {}: simplex result: (r, theta, f)=({:.3f}, {:.3f}'
msg6 += ', {:.3f}) at \n (X,Y)=({:.2f}, {:.2f})'
print(msg6.format(index_planet, r_0[index_planet],
theta_0[index_planet], f_0[index_planet], posx[0], posy[0]))
if verbose:
print('\n', sep, '\nDONE !\n', sep)
timing(start_time)
return (r_0,theta_0,f_0)
|
henry-ngo/VIP
|
vip_hci/negfc/simplex_optim.py
|
Python
|
mit
| 16,676 | 0.010794 |
import os.path
import logging
_logger = logging.getLogger(__name__)
from operator import itemgetter
from tornado.web import Application, RequestHandler, StaticFileHandler
from tornado.ioloop import IOLoop
config = {
'DEBUG': True,
'PORT' : 5000
}
HANDLERS = []
ROOT_DIR = os.path.abspath(os.path.join(os.path.split(__file__)[0], os.path.pardir))
GFXTABLET_DIR = os.path.join(ROOT_DIR, "node_modules", "gfxtablet")
if os.path.exists(GFXTABLET_DIR):
import sys
sys.path.insert(0, GFXTABLET_DIR)
from GfxTablet import GfxTabletHandler
HANDLERS.append((r'/gfxtablet', GfxTabletHandler))
class MainHandler(RequestHandler):
def get(self):
self.render("index.html")
def main():
global HANDLERS
HANDLERS += [(r'/(.+)', StaticFileHandler, {'path': ROOT_DIR}),
(r'/', MainHandler)]
app = Application(HANDLERS,
debug=config.get('DEBUG', False), static_path=ROOT_DIR)
_logger.info("app.settings:\n%s" % '\n'.join(['%s: %s' % (k, str(v))
for k, v in sorted(app.settings.items(),
key=itemgetter(0))]))
port = config.get('PORT', 5000)
app.listen(port)
_logger.info("""
listening on port %d
press CTRL-c to terminate the server
-----------
Y A W V R B
*************************
*********************************
STARTING TORNADO APP!!!!!!!!!!!!!
*********************************
*************************
Y A W V R B
-----------
""" % port)
IOLoop.instance().start()
if __name__ == "__main__":
logging.basicConfig(level=(logging.DEBUG if config.get('DEBUG') else logging.INFO),
format="%(asctime)s: %(levelname)s %(name)s %(funcName)s %(lineno)d: %(message)s")
main()
|
jzitelli/yawvrb.js
|
test/tornado_server.py
|
Python
|
mit
| 1,909 | 0.00681 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
###############################################################################
# Author: Quincey Sun
# Mail: zeroonegit@gmail.com
# Created Time: 2016-06-21 23:14:26
###############################################################################
## This programs asks a user for a name and a password.
# It then checks them to make sure that the user is allowed in .
# Note that this is a simple and insecure example,
# real password code should never be implemented this way.
name = input("What is your name? ")
password = input("What is the password? ")
if name == "Josh" and password == "Friday":
print ("Welcome Josh")
elif name == "Fred" and password == "Rock":
print ("Welcome Fred")
else:
print ("I don't know you.")
|
zeroonegit/python
|
python_programming/password1.py
|
Python
|
mit
| 786 | 0.005089 |
import os
from unittest import TestCase
import mock
from marvel.iterables import BaseIterable
class FooIterable(BaseIterable):
def __init__(self):
self.total_pages = 20
super(FooIterable, self).__init__()
def get_items(self):
if self.total_pages == 0:
raise StopIteration
else:
self.total_pages = self.total_pages - 1
return [self.total_pages]
class TestBaseIterable(TestCase):
def test_limit_pages_not_defined(self):
count = 0
for _ in FooIterable():
count = count + 1
assert count == 20
@mock.patch.dict(os.environ, {'TC_LIMIT_PAGES': '3'})
def test_limit_pages_with_3(self):
count = 0
for _ in FooIterable():
count = count + 1
assert count == 3
|
fernandoe/the-comics
|
tests/marvel/iterables/test_baseIterable.py
|
Python
|
gpl-3.0
| 816 | 0 |
from mqttsqlite.orm.models import Topic
import json
from mqttsqlite.settings.private_settings import MANAGEMENT_PASSWORD, QUERY_PASSWORD
from .utils import Payload, Utils
class TopicsController (object):
def add_topic(self, msg):
received_data = json.loads(msg.payload)
payload = Utils().validate_data(received_data, MANAGEMENT_PASSWORD, ['password', 'client'])
if payload.result == 'OK':
new_topic, created = Topic.get_or_create(name=str(received_data['topic']))
saved_topics = []
for topic in Topic.select():
saved_topics.append(topic.name)
payload.topics = saved_topics
return payload.get_json()
def remove_topic(self, msg):
received_data = json.loads(msg.payload)
payload = Utils().validate_data(received_data, MANAGEMENT_PASSWORD, ['password', 'client'])
if payload.result == 'OK':
topic = Topic.select().where(Topic.name == str(received_data['topic']))
if topic.count() > 0:
topic[0].delete_instance()
else:
payload.result = 'KO'
payload.error = 'Topic not found'
saved_topics = []
for topic in Topic.select():
saved_topics.append(topic.name)
payload.topics = saved_topics
return payload.get_json()
def list_topics(self, msg):
received_data = json.loads(msg.payload)
payload = Utils().validate_data(received_data, QUERY_PASSWORD, ['password', 'client'], topic=False)
if payload.result == 'OK':
saved_topics = []
for topic in Topic.select():
saved_topics.append(topic.name)
payload.topics = saved_topics
return payload.get_json()
def get_storaged_topics(self):
return Topic.select()
def is_topic_subscribed(self, topic):
if Topic.select().where(Topic.name == topic).count():
return True
else:
return False
|
rdiaz82/mqttSqlLite
|
mqttsqlite/core/topics_controller.py
|
Python
|
mit
| 2,034 | 0.00295 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OiR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-long-lambda
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.test_session():
v = variables.Variable(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.test_session():
v = variables.Variable(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.test_session():
v = variables.Variable(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
with self.test_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
def testSwitchMergeIndexedSlices(self):
with self.test_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.test_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.test_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.test_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.test_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testFetchable(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
def testFeedable(self):
with self.test_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
def testCondIndexedSlices(self):
with self.test_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
def testCondSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
with self.test_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
def testCondIndexedSlicesDifferentTypes(self):
with self.test_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.test_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertAllEqual(9, result)
def testCond_3(self):
with self.test_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertAllEqual(12, result)
def testCond_4(self):
with self.test_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.test_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.test_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.test_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.test_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
def testCondWithControl(self):
with self.test_session() as sess:
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.test_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
def testCondGrad_1(self):
graph = ops.Graph()
with graph.as_default():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
with self.test_session():
self.assertAllEqual(1.0, grad.eval())
# The gradients computation creates a tensor with zeros by broadcasting a
# zeros constant to the required shape. Verify that the zero constant
# feeding into the fill is dominated by a Switch.
zero = graph.get_operation_by_name("gradients/zeros/Const")
self.assertEqual(len(zero.control_inputs), 1)
self.assertEqual(zero.control_inputs[0].type, "Identity")
self.assertEqual(zero.control_inputs[0].inputs[0].op.type, "Switch")
def testCondGrad_2(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
def testCondGrad_3(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.test_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
def testCondGrad_Gather(self):
with self.test_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
def testWhile_1(self):
with self.test_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileExternalControlDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result.eval(), 2)
self.assertAllEqual(v.eval(), 1.0)
def testWhileExternalControlDependenciesNoInput(self):
with self.test_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
result.eval()
self.assertAllEqual(v.eval(), 1.0)
def testWhileWithRefs_1(self):
with self.test_session() as sess:
x = variables.Variable(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.test_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
def testWhileWithMaximumIterations(self):
with self.test_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.test_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, r.eval())
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.test_session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context, feed_dict={
p: [0, 0, 0]
})
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
node_stats = run_metadata.step_stats.dev_stats[0].node_stats
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3)
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.test_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(42, result)
def testWhile_5(self):
with self.test_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.test_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def testWhileShape(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
def testWhileWithNonTensorInput_Scalar(self):
with self.test_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
def testWhileWithNonTensorInput_Vector(self):
with self.test_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
def testWhileShapeInference(self):
with self.test_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
r = control_flow_ops.while_loop(c, b, [i, m])
def testWhileShapeInferenceSparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
def testWhileShapeInferenceIndexedSlices(self):
with self.test_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.test_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.test_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_5(self):
with self.test_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.test_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
def testWhileCondWithControl_1(self):
with self.test_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
def testWhileCondExitControl(self):
with self.test_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10, sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.test_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.test_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.test_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
def testWhileUpdateVariable_1(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
def testWhileUpdateVariable_2(self):
with self.test_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
def testWhileUpdateVariable_3(self):
with self.test_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
# b/24814703
def testWhileUpdateVariable_4(self):
with self.test_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
# b/24736492
def testWhileUpdateVariable_5(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
# b/24814668
def testWhileUpdateVariable_6(self):
with self.test_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
def testWhileQueue_1(self):
with self.test_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
def testWhileStack_1(self):
with self.test_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:GPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.test_session(graph=graph) as sess:
self.assertAllClose(1024.0, sess.run(r))
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileGrad_Shape(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
def testWhileGrad_Variable(self):
with self.test_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testGradInWhileWrtInitialLoopVal(self):
with self.test_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
def testWhileGradInWhile(self):
with self.test_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.test_session() as sess:
i_val, x_val = sess.run([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
def testWhile_NestedInput(self):
with self.test_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
def testWhile_NestedBadArityFails(self):
with self.test_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_NoGradient(self):
with self.test_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
def testWhileGrad_NoDependency(self):
with self.test_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.test_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.test_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
def testNestedWhileGrad_SerialInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
def testNestedWhileGrad_ParallelInner(self):
with self.test_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.test_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
def testWhileCondGrad_UnknownShape(self):
with self.test_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
def testWhileGrad_Concat(self):
with self.test_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
def testWhileWithRefsWithGradients_1(self):
with self.test_session() as sess:
x = variables.Variable(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.Variable(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
def testWhileGrad_IndexedSlices(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testWhileGrad_SparseTensor(self):
with self.test_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
def testCallGradInLoop(self):
with self.test_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
def testWhileAndTensorArray(self):
with self.test_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.test_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.test_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.test_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.test_session() as sess:
sess.run(q.initializer)
self.assertAllClose([0., 0.], sess.run(dy_dq))
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.test_session() as sess:
sess.run(q.initializer)
self.assertAllClose([1., 1.], sess.run(dy_dq))
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
def testStopGradMultiFlows(self):
with self.test_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
def testOneValueCond(self):
with self.test_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.test_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
def testCase(self):
with self.test_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
def testCaseSideEffects(self):
with self.test_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
def testOneOpCond(self):
with self.test_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.test_session() as sess:
v = variables.Variable(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.test_session():
v = variables.Variable(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.test_session():
v = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.Variable([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.Variable([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.Variable([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.test_session() as sess:
v1 = variables.Variable([0.0])
v2 = variables.Variable([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.Variable(p1, validate_shape=False)
v2 = variables.Variable(p2, validate_shape=False)
v3 = variables.Variable(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.Variable([[1, 2]])
v2 = variables.Variable([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.Variable([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.Variable(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.test_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.test_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.test_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"]),
1)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.Variable([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.test_session():
v1 = variables.Variable(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.Variable(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.test_session():
var = variables.Variable(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
sess.run(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
sess.run(r)
start_time = time.time()
for _ in xrange(num_iters):
sess.run(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
|
ZhangXinNan/tensorflow
|
tensorflow/python/kernel_tests/control_flow_ops_py_test.py
|
Python
|
apache-2.0
| 121,396 | 0.014259 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobGetOptions(Model):
"""Additional parameters for get operation.
:param select: An OData $select clause.
:type select: str
:param expand: An OData $expand clause.
:type expand: str
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, select=None, expand=None, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.select = select
self.expand = expand
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
AutorestCI/azure-sdk-for-python
|
azure-batch/azure/batch/models/job_get_options.py
|
Python
|
mit
| 3,278 | 0.000305 |
#!/usr/bin/env python
# Progressive Cactus Package
# Copyright (C) 2009-2012 by Glenn Hickey (hickey@soe.ucsc.edu)
# and Benedict Paten (benedictpaten@gmail.com)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import xml.etree.ElementTree as ET
import math
import time
import random
import copy
from optparse import OptionParser
from optparse import OptionGroup
import imp
import socket
import signal
import traceback
import datetime
from sonLib.bioio import logger
from sonLib.bioio import setLoggingFromOptions
from sonLib.bioio import getTempDirectory
from sonLib.bioio import system
from sonLib.bioio import popenCatch
from jobTree.scriptTree.target import Target
from jobTree.scriptTree.stack import Stack
from jobTree.src.master import getJobFileDirName, getConfigFileName
from jobTree.src.jobTreeStatus import parseJobFiles
from cactus.progressive.multiCactusProject import MultiCactusProject
from cactus.shared.experimentWrapper import ExperimentWrapper
from cactus.shared.configWrapper import ConfigWrapper
from seqFile import SeqFile
from projectWrapper import ProjectWrapper
from jobStatusMonitor import JobStatusMonitor
def initParser():
usage = "usage: runProgressiveCactus.sh [options] <seqFile> <workDir> <outputHalFile>\n\n"\
"Required Arguments:\n"\
" <seqFile>\t\tFile containing newick tree and seqeunce paths"\
" paths.\n"\
"\t\t\t(see documetation or examples for format).\n"\
" <workDir>\t\tWorking directory (which can grow "\
"exteremely large)\n"\
" <outputHalFile>\tPath of output alignment in .hal format."
parser = OptionParser(usage=usage)
#JobTree Options (method below now adds an option group)
Stack.addJobTreeOptions(parser)
#Progressive Cactus will handle where the jobtree path is
parser.remove_option("--jobTree")
#Progressive Cactus Options
parser.add_option("--optionsFile", dest="optionsFile",
help="Text file containing command line options to use as"\
" defaults", default=None)
parser.add_option("--database", dest="database",
help="Database type: tokyo_cabinet or kyoto_tycoon"
" [default: %default]",
default="kyoto_tycoon")
parser.add_option("--outputMaf", dest="outputMaf",
help="[DEPRECATED use hal2maf on the ouput file instead] Path of output alignment in .maf format. This option should be avoided and will soon be removed. It may cause sequence names to be mangled, and use a tremendous amount of memory. ",
default=None)
parser.add_option("--configFile", dest="configFile",
help="Specify cactus configuration file",
default=None)
parser.add_option("--legacy", dest="legacy", action="store_true", help=
"Run cactus directly on all input sequences "
"without any progressive decomposition (ie how it "
"was originally published in 2011)",
default=False)
parser.add_option("--autoAbortOnDeadlock", dest="autoAbortOnDeadlock",
action="store_true",
help="Abort automatically when jobTree monitor" +
" suspects a deadlock by deleting the jobTree folder." +
" Will guarantee no trailing ktservers but still " +
" dangerous to use until we can more robustly detect " +
" deadlocks.",
default=False)
parser.add_option("--overwrite", dest="overwrite", action="store_true",
help="Re-align nodes in the tree that have already" +
" been successfully aligned.",
default=False)
parser.add_option("--rootOutgroupDists", dest="rootOutgroupDists",
help="root outgroup distance (--rootOutgroupPaths must " +
"be given as well)", default=None)
parser.add_option("--rootOutgroupPaths", dest="rootOutgroupPaths", type=str,
help="root outgroup path (--rootOutgroup must be given " +
"as well)", default=None)
parser.add_option("--root", dest="root", help="Name of ancestral node (which"
" must appear in NEWICK tree in <seqfile>) to use as a "
"root for the alignment. Any genomes not below this node "
"in the tree may be used as outgroups but will never appear"
" in the output. If no root is specifed then the root"
" of the tree is used. ", default=None)
#Kyoto Tycoon Options
ktGroup = OptionGroup(parser, "kyoto_tycoon Options",
"Kyoto tycoon provides a client/server framework "
"for large in-memory hash tables and is available "
"via the --database option.")
ktGroup.add_option("--ktPort", dest="ktPort",
help="starting port (lower bound of range) of ktservers"
" [default: %default]",
default=1978)
ktGroup.add_option("--ktHost", dest="ktHost",
help="The hostname to use for connections to the "
"ktserver (this just specifies where nodes will attempt"
" to find the server, *not* where the ktserver will be"
" run)",
default=None)
ktGroup.add_option("--ktType", dest="ktType",
help="Kyoto Tycoon server type "
"(memory, snapshot, or disk)"
" [default: %default]",
default='memory')
# sonlib doesn't allow for spaces in attributes in the db conf
# which renders this options useless
#ktGroup.add_option("--ktOpts", dest="ktOpts",
# help="Command line ktserver options",
# default=None)
ktGroup.add_option("--ktCreateTuning", dest="ktCreateTuning",
help="ktserver options when creating db "\
"(ex #bnum=30m#msiz=50g)",
default=None)
ktGroup.add_option("--ktOpenTuning", dest="ktOpenTuning",
help="ktserver options when opening existing db "\
"(ex #opts=ls#ktopts=p)",
default=None)
parser.add_option_group(ktGroup)
return parser
# Try to weed out errors early by checking options and paths
def validateInput(workDir, outputHalFile, options):
try:
if workDir.find(' ') >= 0:
raise RuntimeError("Cactus does not support spaces in pathnames: %s"
% workDir)
if not os.path.isdir(workDir):
os.makedirs(workDir)
if not os.path.isdir(workDir) or not os.access(workDir, os.W_OK):
raise
except:
raise RuntimeError("Can't write to workDir: %s" % workDir)
try:
open(outputHalFile, "w")
except:
raise RuntimeError("Unable to write to hal: %s" % outputHalFile)
if options.database != "tokyo_cabinet" and\
options.database != "kyoto_tycoon":
raise RuntimeError("Invalid database type: %s" % options.database)
if options.outputMaf is not None:
try:
open(options.outputMaf, "w")
except:
raise RuntimeError("Unable to write to maf: %s" % options.outputMaf)
if options.configFile is not None:
try:
ConfigWrapper(ET.parse(options.configFile).getroot())
except:
raise RuntimeError("Unable to read config: %s" % options.configFile)
if options.database == 'kyoto_tycoon':
if options.ktType.lower() != 'memory' and\
options.ktType.lower() != 'snapshot' and\
options.ktType.lower() != 'disk':
raise RuntimeError("Invalid ktserver type specified: %s. Must be "
"memory, snapshot or disk" % options.ktType)
# Convert the jobTree options taken in by the parser back
# out to command line options to pass to progressive cactus
def getJobTreeCommands(jtPath, parser, options):
cmds = "--jobTree %s" % jtPath
for optGroup in parser.option_groups:
if optGroup.title.startswith("jobTree") or optGroup.title.startswith("Jobtree"):
for opt in optGroup.option_list:
if hasattr(options, opt.dest) and \
getattr(options, opt.dest) != optGroup.defaults[opt.dest]:
cmds += " %s" % str(opt)
if opt.nargs > 0:
cmds += " \"%s\"" % getattr(options, opt.dest)
return cmds
# Go through a text file and add every word inside to an arguments list
# which will be prepended to sys.argv. This way both the file and
# command line are passed to the option parser, with the command line
# getting priority.
def parseOptionsFile(path):
if not os.path.isfile(path):
raise RuntimeError("Options File not found: %s" % path)
args = []
optFile = open(path, "r")
for l in optFile:
line = l.rstrip()
if line:
args += shlex.split(line)
# This source file should always be in progressiveCactus/src. So
# we return the path to progressiveCactus/environment, which needs
# to be sourced before doing anything.
def getEnvFilePath():
path = os.path.dirname(sys.argv[0])
envFile = os.path.join(path, '..', 'environment')
assert os.path.isfile(envFile)
return envFile
# If specified with the risky --autoAbortOnDeadlock option, we call this to
# force an abort if the jobStatusMonitor thinks it's hopeless.
# We delete the jobTreePath to get rid of kyoto tycoons.
def abortFunction(jtPath, options):
def afClosure():
sys.stderr.write('\nAborting due to deadlock (prevent with'
+ '--noAutoAbort' +
' option), and running rm -rf %s\n\n' % jtPath)
system('rm -rf %s' % jtPath)
sys.exit(-1)
if options.autoAbortOnDeadlock:
return afClosure
else:
return None
# Run cactus progressive on the project that has been created in workDir.
# Any jobtree options are passed along. Should probably look at redirecting
# stdout/stderr in the future.
def runCactus(workDir, jtCommands, jtPath, options):
envFile = getEnvFilePath()
pjPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
'%s_project.xml' % ProjectWrapper.alignmentDirName)
logFile = os.path.join(workDir, 'cactus.log')
if options.overwrite:
overwriteFlag = '--overwrite'
system("rm -f %s" % logFile)
else:
overwriteFlag = ''
logHandle = open(logFile, "a")
logHandle.write("\n%s: Beginning Progressive Cactus Alignment\n\n" % str(
datetime.datetime.now()))
logHandle.close()
cmd = '. %s && cactus_progressive.py %s %s %s >> %s 2>&1' % (envFile,
jtCommands,
pjPath,
overwriteFlag,
logFile)
jtMonitor = JobStatusMonitor(jtPath, pjPath, logFile,
deadlockCallbackFn=abortFunction(jtPath,
options))
if options.database == "kyoto_tycoon":
jtMonitor.daemon = True
jtMonitor.start()
system(cmd)
logHandle = open(logFile, "a")
logHandle.write("\n%s: Finished Progressive Cactus Alignment\n" % str(
datetime.datetime.now()))
logHandle.close()
def checkCactus(workDir, options):
pass
# Call cactus2hal to extract a single hal file out of the progressive
# alignmenet in the working directory. If the maf option was set, we
# just move out the root maf.
def extractOutput(workDir, outputHalFile, options):
if options.outputMaf is not None:
mcProj = MultiCactusProject()
mcProj.readXML(
os.path.join(workDir, ProjectWrapper.alignmentDirName,
ProjectWrapper.alignmentDirName + "_project.xml"))
rootName = mcProj.mcTree.getRootName()
rootPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
rootName, rootName + '.maf')
cmd = 'mv %s %s' % (rootPath, options.outputMaf)
system(cmd)
envFile = getEnvFilePath()
logFile = os.path.join(workDir, 'cactus.log')
pjPath = os.path.join(workDir, ProjectWrapper.alignmentDirName,
'%s_project.xml' % ProjectWrapper.alignmentDirName)
logHandle = open(logFile, "a")
logHandle.write("\n\n%s: Beginning HAL Export\n\n" % str(
datetime.datetime.now()))
logHandle.close()
cmd = '. %s && cactus2hal.py %s %s >> %s 2>&1' % (envFile, pjPath,
outputHalFile, logFile)
system(cmd)
logHandle = open(logFile, "a")
logHandle.write("\n%s: Finished HAL Export \n" % str(
datetime.datetime.now()))
logHandle.close()
def main():
# init as dummy function
cleanKtFn = lambda x,y:x
stage = -1
workDir = None
try:
parser = initParser()
options, args = parser.parse_args()
if (options.rootOutgroupDists is not None) \
^ (options.rootOutgroupPaths is not None):
parser.error("--rootOutgroupDists and --rootOutgroupPaths must be " +
"provided together")
if len(args) == 0:
parser.print_help()
return 1
if len(args) != 3:
raise RuntimeError("Error parsing command line. Exactly 3 arguments are required but %d arguments were detected: %s" % (len(args), str(args)))
if options.optionsFile != None:
fileArgs = parseOptionsFile(options.optionsFile)
options, args = parser.parse_args(fileArgs + sys.argv[1:])
if len(args) != 3:
raise RuntimeError("Error parsing options file. Make sure all "
"options have -- prefix")
stage = 0
setLoggingFromOptions(options)
seqFile = SeqFile(args[0])
workDir = args[1]
outputHalFile = args[2]
validateInput(workDir, outputHalFile, options)
jtPath = os.path.join(workDir, "jobTree")
stage = 1
print "\nBeginning Alignment"
system("rm -rf %s" % jtPath)
projWrapper = ProjectWrapper(options, seqFile, workDir)
projWrapper.writeXml()
jtCommands = getJobTreeCommands(jtPath, parser, options)
runCactus(workDir, jtCommands, jtPath, options)
cmd = 'jobTreeStatus --failIfNotComplete --jobTree %s > /dev/null 2>&1 ' %\
jtPath
system(cmd)
stage = 2
print "Beginning HAL Export"
extractOutput(workDir, outputHalFile, options)
print "Success.\n" "Temporary data was left in: %s\n" \
% workDir
return 0
except RuntimeError, e:
sys.stderr.write("Error: %s\n\n" % str(e))
if stage >= 0 and workDir is not None and os.path.isdir(workDir):
sys.stderr.write("Temporary data was left in: %s\n" % workDir)
if stage == 1:
sys.stderr.write("More information can be found in %s\n" %
os.path.join(workDir, "cactus.log"))
elif stage == 2:
sys.stderr.write("More information can be found in %s\n" %
os.path.join(workDir, "cactus.log"))
return -1
if __name__ == '__main__':
sys.exit(main())
|
BD2KGenomics/cactus
|
src/progressiveCactus.py
|
Python
|
gpl-3.0
| 16,648 | 0.003844 |
# profiling_late.py
#
# Copyright (C) 2015 Kano Computing Ltd.
# License: http://www.gnu.org/licenses/gpl-2.0.txt GNU General Public License v2
#
#
'''
Module to enable profiling timepoints. This module is loaded
only if the configuration file exists, see profiling.py for more information
'''
import os
import sys
import yaml
import cProfile
from kano.logging import logger
from kano.profiling import CONF_FILE
# load the configuration file
with open(CONF_FILE, 'r') as inp_conf:
conf = yaml.load(inp_conf)
myProfile = cProfile.Profile()
app_name = sys.argv[0]
point_current = ""
def has_key(d, k):
return type(d) is dict and k in d
def declare_timepoint(name, isStart):
global myProfile
global point_current
cmd = None
pythonProfile = False
# Check if the app is contained in the profiling conf file
if has_key(conf, app_name):
# Check if the timepoint name is contained in the profiling conf file
if has_key(conf[app_name], name):
ct = conf[app_name][name]
# Check if python profiler should be started for this timepoint
if has_key(ct, 'python'):
pythonProfile = True
if isStart:
if point_current:
logger.error('Stop profiling for point "{0}" and do "{1}" instead'.format(point_current, name))
myProfile.disable()
myProfile.clear()
point_current = name
myProfile.enable()
else:
if point_current != name:
logger.error('Can\'t stop point "{0}" since a profiling session for "{1}" is being run'.format(name, point_current))
else:
myProfile.disable()
# Check if the statfile location in specified
if ct['python']['statfile']:
try:
myProfile.dump_stats(ct['python']['statfile'])
except IOError as e:
if e.errno == 2:
logger.error('Path to "{}" probably does not exist'.format(ct['python']['statfile']))
else:
logger.error('dump_stats IOError: errno:{0}: {1} '.format(e.errno, e.strerror))
else:
logger.error('No statfile entry in profiling conf file "{}"'.format(CONF_FILE))
myProfile.clear()
point_current = ""
else:
logger.info('Profiling conf file doesnt enable the Python profiler for point {} at app {}'.format(name, app_name))
# Check if we want to run some other command at this timepoint
if isStart and has_key(ct, 'start_exec'):
cmd = ct['start_exec']
os.system(cmd)
if not isStart and has_key(ct, 'end_exec'):
cmd = ct['end_exec']
os.system(cmd)
else:
logger.info('Profiling conf file doesnt include point:{} for app {}'.format(name, app_name))
else:
logger.info('Profiling conf file doesnt include app:{}'.format(app_name))
logger.debug('timepoint '+name, transition=name, isStart=isStart, cmd=cmd, pythonProfile=pythonProfile)
|
rcocetta/kano-toolset
|
kano/profiling_late.py
|
Python
|
gpl-2.0
| 3,436 | 0.00291 |
#!/usr/bin/env python
# encoding: utf-8
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="cubehelix",
version="0.1.0",
author="James Davenport",
# author_email="",
description="Cubehelix colormaps for matplotlib",
long_description=read('README.md'),
# license="BSD",
py_modules=['cubehelix'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Scientific/Engineering :: Visualization",
# "License :: OSI Approved :: BSD License",
]
)
|
jradavenport/cubehelix
|
setup.py
|
Python
|
bsd-2-clause
| 605 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._data_masking_rules_operations import build_create_or_update_request, build_get_request, build_list_by_sql_pool_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataMaskingRulesOperations:
"""DataMaskingRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
data_masking_rule_name: str,
parameters: "_models.DataMaskingRule",
**kwargs: Any
) -> "_models.DataMaskingRule":
"""Creates or updates a Sql pool data masking rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param data_masking_rule_name: The name of the data masking rule.
:type data_masking_rule_name: str
:param parameters: The required parameters for creating or updating a data masking rule.
:type parameters: ~azure.mgmt.synapse.models.DataMaskingRule
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataMaskingRule, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.DataMaskingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataMaskingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DataMaskingRule')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
data_masking_rule_name=data_masking_rule_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DataMaskingRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataMaskingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/dataMaskingPolicies/{dataMaskingPolicyName}/rules/{dataMaskingRuleName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
data_masking_rule_name: str,
**kwargs: Any
) -> "_models.DataMaskingRule":
"""Gets the specific Sql pool data masking rule.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param data_masking_rule_name: The name of the data masking rule.
:type data_masking_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataMaskingRule, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.DataMaskingRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataMaskingRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
data_masking_rule_name=data_masking_rule_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DataMaskingRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/dataMaskingPolicies/{dataMaskingPolicyName}/rules/{dataMaskingRuleName}'} # type: ignore
@distributed_trace
def list_by_sql_pool(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DataMaskingRuleListResult"]:
"""Gets a list of Sql pool data masking rules.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DataMaskingRuleListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.DataMaskingRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataMaskingRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_sql_pool_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
template_url=self.list_by_sql_pool.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_sql_pool_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DataMaskingRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_sql_pool.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/dataMaskingPolicies/{dataMaskingPolicyName}/rules'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_data_masking_rules_operations.py
|
Python
|
mit
| 12,027 | 0.004074 |
import os
import socket
import sys
input_host = '127.0.0.1'
input_port = 65000
batch_enabled = int(os.environ.get('_BACKEND_BATCH_MODE', '0'))
if batch_enabled:
# Since latest Python 2 has `builtins`and `input`,
# we cannot detect Python 2 with the existence of them.
if sys.version_info.major > 2:
import builtins
def _input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except ConnectionRefusedError:
userdata = b'<user-input-unavailable>'
return userdata.decode()
builtins._input = input # type: ignore
builtins.input = _input
else:
# __builtins__ is an alias dict for __builtin__ in modules other than __main__.
# Thus, we have to explicitly import __builtin__ module in Python 2.
import __builtin__
builtins = __builtin__
def _raw_input(prompt=''):
sys.stdout.write(prompt)
sys.stdout.flush()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((input_host, input_port))
userdata = sock.recv(1024)
except socket.error:
userdata = b'<user-input-unavailable>'
finally:
sock.close()
return userdata.decode()
builtins._raw_input = builtins.raw_input # type: ignore
builtins.raw_input = _raw_input # type: ignore
|
lablup/sorna-agent
|
src/ai/backend/kernel/python/sitecustomize.py
|
Python
|
lgpl-3.0
| 1,693 | 0.000591 |
from collections import OrderedDict
from django.contrib import admin
from edc_export.actions import export_as_csv_action
from edc_base.modeladmin.admin import BaseTabularInline
from ..forms import MaternalArvPostForm, MaternalArvPostMedForm, MaternalArvPostAdhForm
from ..models import MaternalVisit, MaternalArvPost, MaternalArvPostMed, MaternalArvPostAdh
from .base_maternal_model_admin import BaseMaternalModelAdmin
class MaternalArvPostModInlineAdmin(BaseTabularInline):
model = MaternalArvPostMed
form = MaternalArvPostMedForm
extra = 1
class MaternalArvPostModAdmin(BaseMaternalModelAdmin):
form = MaternalArvPostMedForm
list_display = ('maternal_arv_post', 'arv_code', 'dose_status', 'modification_date', 'modification_code')
radio_fields = {
"arv_code": admin.VERTICAL,
"dose_status": admin.VERTICAL,
"modification_code": admin.VERTICAL,
}
actions = [
export_as_csv_action(
description="CSV Export of Maternal ARV Post with list",
fields=[],
delimiter=',',
exclude=['created', 'modified', 'user_created', 'user_modified', 'revision', 'id', 'hostname_created',
'hostname_modified'],
extra_fields=OrderedDict(
{'subject_identifier':
'maternal_arv_post__maternal_visit__appointment__registered_subject__subject_identifier',
'gender': 'maternal_arv_post__maternal_visit__appointment__registered_subject__gender',
'dob': 'maternal_arv_post__maternal_visit__appointment__registered_subject__dob',
'on_arv_since': 'maternal_arv_post__on_arv_since',
'on_arv_reason': 'maternal_arv_post__on_arv_reason',
'on_arv_reason_other': 'maternal_arv_post__on_arv_reason_other',
'arv_status': 'maternal_arv_post__arv_status',
}),
)]
admin.site.register(MaternalArvPostMed, MaternalArvPostModAdmin)
class MaternalArvPostAdmin(BaseMaternalModelAdmin):
form = MaternalArvPostForm
fields = (
"maternal_visit",
"on_arv_since",
"on_arv_reason",
"on_arv_reason_other",
"arv_status")
radio_fields = {
"on_arv_since": admin.VERTICAL,
"on_arv_reason": admin.VERTICAL,
"arv_status": admin.VERTICAL}
inlines = [MaternalArvPostModInlineAdmin, ]
actions = [
export_as_csv_action(
description="CSV Export of Maternal ARV Post",
fields=[],
delimiter=',',
exclude=['created', 'modified', 'user_created', 'user_modified', 'revision', 'id', 'hostname_created',
'hostname_modified'],
extra_fields=OrderedDict(
{'subject_identifier': 'maternal_visit__appointment__registered_subject__subject_identifier',
'gender': 'maternal_visit__appointment__registered_subject__gender',
'dob': 'maternal_visit__appointment__registered_subject__dob',
}),
)]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "maternal_visit":
if request.GET.get('maternal_visit'):
kwargs["queryset"] = MaternalVisit.objects.filter(id=request.GET.get('maternal_visit'))
return super(MaternalArvPostAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
admin.site.register(MaternalArvPost, MaternalArvPostAdmin)
class MaternalArvPostAdhAdmin(BaseMaternalModelAdmin):
form = MaternalArvPostAdhForm
fields = (
"maternal_visit",
"missed_doses",
"missed_days",
"missed_days_discnt",
"comment")
actions = [
export_as_csv_action(
description="CSV Export of Maternal ARVs Post: Adherence",
fields=[],
delimiter=',',
exclude=['created', 'modified', 'user_created', 'user_modified', 'revision', 'id', 'hostname_created',
'hostname_modified'],
extra_fields=OrderedDict(
{'subject_identifier': 'maternal_visit__appointment__registered_subject__subject_identifier',
'gender': 'maternal_visit__appointment__registered_subject__gender',
'dob': 'maternal_visit__appointment__registered_subject__dob',
'registered': 'maternal_visit__appointment__registered_subject__registration_datetime'}),
)]
admin.site.register(MaternalArvPostAdh, MaternalArvPostAdhAdmin)
|
TshepangRas/tshilo-dikotla
|
td_maternal/admin/maternal_arv_post_admin.py
|
Python
|
gpl-2.0
| 4,570 | 0.004376 |
#!/usr/bin/env python
########################################################################
# File : dirac-admin-sync-users-from-file
# Author : Adrian Casajus
########################################################################
"""
Sync users in Configuration with the cfg contents.
Usage:
dirac-admin-sync-users-from-file [options] ... UserCfg
Arguments:
UserCfg: Cfg FileName with Users as sections containing DN, Groups, and other properties as options
Example:
$ dirac-admin-sync-users-from-file file_users.cfg
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from diraccfg import CFG
import DIRAC
from DIRAC.Core.Base import Script
from DIRAC.Core.Utilities.DIRACScript import DIRACScript
__RCSID__ = "$Id$"
@DIRACScript()
def main():
Script.registerSwitch("t", "test", "Only test. Don't commit changes")
Script.parseCommandLine(ignoreErrors=True)
args = Script.getExtraCLICFGFiles()
if len(args) < 1:
Script.showHelp()
from DIRAC.Interfaces.API.DiracAdmin import DiracAdmin
diracAdmin = DiracAdmin()
exitCode = 0
testOnly = False
errorList = []
for unprocSw in Script.getUnprocessedSwitches():
if unprocSw[0] in ("t", "test"):
testOnly = True
try:
usersCFG = CFG().loadFromFile(args[0])
except Exception as e:
errorList.append("file open", "Can't parse file %s: %s" % (args[0], str(e)))
errorCode = 1
else:
if not diracAdmin.csSyncUsersWithCFG(usersCFG):
errorList.append(("modify users", "Cannot sync with %s" % args[0]))
exitCode = 255
if not exitCode and not testOnly:
result = diracAdmin.csCommitChanges()
if not result['OK']:
errorList.append(("commit", result['Message']))
exitCode = 255
for error in errorList:
print("ERROR %s: %s" % error)
DIRAC.exit(exitCode)
if __name__ == "__main__":
main()
|
yujikato/DIRAC
|
src/DIRAC/Interfaces/scripts/dirac_admin_sync_users_from_file.py
|
Python
|
gpl-3.0
| 1,909 | 0.012572 |
# -*- coding: utf-8 -*-
from hangulize import *
class Finnish(Language):
"""For transcribing Finnish."""
__iso639__ = {1: 'fi', 2: 'fin', 3: 'fin'}
__tmp__ = ',;%'
vowels = 'aAeioOuy'
ob = 'bdfgkpstT'
notation = Notation([
# Convention: A = ä, O = ö
('å', 'o'),
('ä', 'A'),
('ö', 'O'),
('w', 'v'),
('xx', 'x'),
('x', 'ks'),
('z', 's'),
('ds', 'T'),
('ts', 'T'),
('c{e|i|y}', 's'),
('c', 'k'),
('q', 'k'),
('ng', 'N'),
('nk', 'Nk'),
('mn{@}', 'm,n'),
('mn', 'm'),
('th', 't'),
('^j{@}', 'J'),
('{@}j{@}', 'J'),
('{h|s|T}j', '%J'),
('j', 'i'),
('aa', 'a'),
('bb', 'b'),
('dd', 'd'),
('ee', 'e'),
('AA', 'A'),
('ff', 'f'),
('gg', 'g'),
('hh', 'h'),
('ii', 'i'),
('jj', 'j'),
('kk', 'k'),
('ll', 'l'),
('{@}mm{@}', 'm,m'),
('mm', 'm'),
('{@}nn{@}', 'n,n'),
('nn', 'n'),
('oo', 'o'),
('pp', 'p'),
('rr', 'r'),
('ss', 's'),
('tt', 't'),
('uu', 'u'),
('vv', 'v'),
('yy', 'y'),
('zz', 'z'),
('{@}b{<ob>}', 'p,'),
('{@}g{<ob>}', 'k,'),
('{@}k{<ob>}', 'k,'),
('{@}p{<ob>}', 'p,'),
('{@}t{<ob>}', 't,'),
('^l', 'l;'),
('^m', 'm;'),
('^n', 'n;'),
('l$', 'l,'),
('m$', 'm,'),
('n$', 'n,'),
('l{@|m,|n,|N}', 'l;'),
('{,}l', 'l;'),
('m{@}', 'm;'),
('n{@}', 'n;'),
('l', 'l,'),
('m', 'm,'),
('n', 'n,'),
('N', 'N,'),
(',,', ','),
(',;', None),
(',l,', 'l,'),
(',m,', 'm,'),
(',n,', 'n,'),
(',N,', 'N,'),
('l{m;|n;}', 'l,'),
(';', None),
('b', Choseong(B)),
('d', Choseong(D)),
('f', Choseong(P)),
('g', Choseong(G)),
('h', Choseong(H)),
('k,', Jongseong(G)),
('k', Choseong(K)),
('^l', Choseong(L)),
('{,|-}l', Choseong(L)),
('-', None),
('l,', Jongseong(L)),
('l', Jongseong(L), Choseong(L)),
('m,', Jongseong(M)),
('m', Choseong(M)),
('n,', Jongseong(N)),
('n', Choseong(N)),
('N', Jongseong(NG)),
('p,', Jongseong(B)),
('p', Choseong(P)),
('r', Choseong(L)),
('s', Choseong(S)),
('t,', Jongseong(S)),
('t', Choseong(T)),
('T', Choseong(C)),
('v', Choseong(B)),
('%', Choseong(NG)),
('Ja', Jungseong(YA)),
('JA', Jungseong(YAE)),
('Je', Jungseong(YE)),
('Ji', Jungseong(I)),
('Jo', Jungseong(YO)),
('JO', Jungseong(OE)),
('Ju', Jungseong(YU)),
('Jy', Jungseong(WI)),
('a', Jungseong(A)),
('A', Jungseong(AE)),
('e', Jungseong(E)),
('i', Jungseong(I)),
('o', Jungseong(O)),
('u', Jungseong(U)),
('y', Jungseong(WI)),
('O', Jungseong(OE)),
])
def normalize(self, string):
return normalize_roman(string, {
'Å': 'å', 'Ǻ': 'å', 'ǻ': 'å', 'Ä': 'ä', 'Ö': 'ö'
})
__lang__ = Finnish
|
Jinwithyoo/han
|
hangulize/langs/fin/__init__.py
|
Python
|
bsd-3-clause
| 3,370 | 0 |
#!/usr/bin/python
import os.path
import subprocess
import sys
import urllib
KEY_FILE = "submit.token"
def main(filename):
# Prompt for key if missing
if not os.path.exists(KEY_FILE):
print "Please visit http://css.csail.mit.edu/6.858/2014/labs/handin.html"
print "and enter your API key."
key = raw_input("Key: ").strip()
with open(KEY_FILE, "w") as f:
f.write(key + "\n")
print "API key written to %s" % KEY_FILE
# Read the key.
with open(KEY_FILE) as f:
key = f.read().strip()
# Shell out to curl. urllib2 doesn't deal with multipart attachments. Throw
# away the output; you just get a random HTML page.
with open("/dev/null", "a") as null:
subprocess.check_call(["curl", "-f",
"-F", "file=@%s" % filename,
"-F", "key=%s" % key,
"http://6858.scripts.mit.edu/submit/handin.py/upload"],
stdout=null, stderr=null)
print "Submitted %s." % filename
print "Please visit http://css.csail.mit.edu/6.858/2014/labs/handin.html"
print "to verify the upload."
if __name__ == "__main__":
if len(sys.argv) < 2:
print "Usage: %s TARBALL" % sys.argv[0]
sys.exit(1)
main(sys.argv[1])
|
wyqwyq/mit6858-lab
|
submit.py
|
Python
|
mit
| 1,330 | 0.003008 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2015: Alignak team, see AUTHORS.txt file for contributors
#
# This file is part of Alignak.
#
# Alignak is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Alignak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Alignak. If not, see <http://www.gnu.org/licenses/>.
#
#
# This file incorporates work covered by the following copyright and
# permission notice:
#
# Copyright (C) 2009-2014:
# Jean Gabes, naparuba@gmail.com
# Sebastien Coavoux, s.coavoux@free.fr
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from alignak_test import *
class TestServiceDescriptionInheritance(AlignakTest):
def setUp(self):
self.setup_with_file('etc/alignak_service_description_inheritance.cfg')
def test_service_description_inheritance(self):
self.print_header()
svc = self.sched.services.find_srv_by_name_and_hostname("MYHOST", "SSH")
self.assertIsNotNone(svc)
def test_service_description_inheritance_multihosts(self):
self.print_header()
for hname in ["MYHOST2", "MYHOST3"]:
svc = self.sched.services.find_srv_by_name_and_hostname(hname, "SSH")
self.assertIsNotNone(svc)
if __name__ == '__main__':
unittest.main()
|
ddurieux/alignak
|
test/test_service_description_inheritance.py
|
Python
|
agpl-3.0
| 2,454 | 0.00163 |
"""Misc helper functions for extracting morphological
info from CLTK data structures.
"""
from typing import List, Optional, Tuple, Union
from cltk.core.data_types import Word
from cltk.core.exceptions import CLTKException
from cltk.morphology.universal_dependencies_features import (
NOMINAL_FEATURES,
VERBAL_FEATURES,
MorphosyntacticFeature,
)
ALL_POSSIBLE_FEATURES = NOMINAL_FEATURES + VERBAL_FEATURES
def get_pos(word: Optional[Word]) -> Union[str, None]:
"""Take word, return structured info."""
if not word:
return None
return word.pos.name
def get_features(
word: Optional[Word],
prepend_to_label: str = None,
) -> Tuple[List[str], List[Union[str, int, float, None]]]:
"""Take a word, return a list of feature labels."""
features_present = list() # type: List[Union[str, None]]
feature_variables = list() # type: List[str]
for possible_feature in ALL_POSSIBLE_FEATURES:
feature_variables.append(str(possible_feature).lower())
if not word:
features_present.append(None)
continue
try:
feat = word.__getattr__(possible_feature)[0] # type: MorphosyntacticFeature
features_present.append(str(feat.name))
except CLTKException:
features_present.append(None)
if prepend_to_label:
feature_variables = [prepend_to_label + name for name in feature_variables]
return feature_variables, features_present
|
kylepjohnson/cltk
|
src/cltk/morphology/utils.py
|
Python
|
mit
| 1,473 | 0.001358 |
"""A client for the REST API of imeji instances."""
import logging
from collections import OrderedDict
import requests
from six import string_types
from pyimeji import resource
from pyimeji.config import Config
log = logging.getLogger(__name__)
class ImejiError(Exception):
def __init__(self, message, error):
super(ImejiError, self).__init__(message)
self.error = error.get('error') if isinstance(error, dict) else error
class GET(object):
"""Handle GET requests.
This includes requests
- to retrieve single objects,
- to fetch lists of object references (which are returned as `OrderedDict` mapping
object `id` to additional metadata present in the response).
"""
def __init__(self, api, name):
"""Initialize a handler.
:param api: An Imeji API instance.
:param name: Name specifying the kind of object(s) to retrieve. We check whether\
this name has a plural "s" to determine if a list is to be retrieved.
"""
self._list = name.endswith('s')
self.rsc = getattr(resource, (name[:-1] if self._list else name).capitalize())
self.api = api
self.name = name
self.path = name
if not self._list:
self.path += 's'
def __call__(self, id='', **kw):
"""Calling the handler initiates an HTTP request to the imeji server.
:param id: If a single object is to be retrieved it must be specified by id.
:return: An OrderedDict mapping id to additional metadata for lists, a \
:py:class:`pyimeji.resource.Resource` instance for single objects.
"""
if not self._list and not id:
raise ValueError('no id given')
if id:
id = '/' + id
res = self.api._req('/%s%s' % (self.path, id), params=kw)
if not self._list:
return self.rsc(res, self.api)
return OrderedDict([(d['id'], d) for d in res])
class Imeji(object):
"""The client.
>>> api = Imeji(service_url='http://demo.imeji.org/imeji/')
>>> collection_id = list(api.collections().keys())[0]
>>> collection = api.collection(collection_id)
>>> collection = api.create('collection', title='the new collection')
>>> item = collection.add_item(fetchUrl='http://example.org')
>>> item.delete()
"""
def __init__(self, cfg=None, service_url=None):
self.cfg = cfg or Config()
self.service_url = service_url or self.cfg.get('service', 'url')
user = self.cfg.get('service', 'user', default=None)
password = self.cfg.get('service', 'password', default=None)
self.session = requests.Session()
if user and password:
self.session.auth = (user, password)
def _req(self, path, method='get', json=True, assert_status=200, **kw):
"""Make a request to the API of an imeji instance.
:param path: HTTP path.
:param method: HTTP method.
:param json: Flag signalling whether the response should be treated as JSON.
:param assert_status: Expected HTTP response status of a successful request.
:param kw: Additional keyword parameters will be handed through to the \
appropriate function of the requests library.
:return: The return value of the function of the requests library or a decoded \
JSON object/array.
"""
method = getattr(self.session, method.lower())
res = method(self.service_url + '/rest' + path, **kw)
status_code = res.status_code
if json:
try:
res = res.json()
except ValueError: # pragma: no cover
log.error(res.text[:1000])
raise
if assert_status:
if status_code != assert_status:
log.error(
'got HTTP %s, expected HTTP %s' % (status_code, assert_status))
log.error(res.text[:1000] if hasattr(res, 'text') else res)
raise ImejiError('Unexpected HTTP status code', res)
return res
def __getattr__(self, name):
"""Names of resource classes are accepted and resolved as dynamic attribute names.
This allows convenient retrieval of resources as api.<resource-class>(id=<id>),
or api.<resource-class>s(q='x').
"""
return GET(self, name)
def create(self, rsc, **kw):
if isinstance(rsc, string_types):
cls = getattr(resource, rsc.capitalize())
rsc = cls(kw, self)
return rsc.save()
def delete(self, rsc):
return rsc.delete()
def update(self, rsc, **kw):
for k, v in kw.items():
setattr(rsc, k, v)
return rsc.save()
|
xrotwang/pyimeji
|
pyimeji/api.py
|
Python
|
apache-2.0
| 4,739 | 0.002532 |
# -*- coding: utf-8 -*-
import pytest
from flask import url_for
def test_config(app):
assert app.debug, 'App is in debug mode'
assert not app.config.get('MINIFY_HTML'), 'App does minify html'
assert app.config.get('ASSETS_DEBUG'), 'App does build assets'
assert app.config.get('YARR_URL'), 'App doesn\'t have Yarr! URL specified'
def test_routes(client):
assert client.get(url_for('index')).status_code == 200
assert client.get(url_for('search')).status_code == 302, 'Empty query should throw redirect'
|
bluecap-se/yarr.client
|
tests/app_tests.py
|
Python
|
mit
| 539 | 0.001855 |
#!/usr/bin/env python
import unittest
from test import test_support
import socket
import urllib
import sys
import os
import time
mimetools = test_support.import_module("mimetools", deprecated=True)
def _open_with_retry(func, host, *args, **kwargs):
# Connecting to remote hosts is flaky. Make it more robust
# by retrying the connection several times.
for i in range(3):
try:
return func(host, *args, **kwargs)
except IOError, last_exc:
continue
except:
raise
raise last_exc
class URLTimeoutTest(unittest.TestCase):
TIMEOUT = 10.0
def setUp(self):
socket.setdefaulttimeout(self.TIMEOUT)
def tearDown(self):
socket.setdefaulttimeout(None)
def testURLread(self):
f = _open_with_retry(urllib.urlopen, "http://www.python.org/")
x = f.read()
class urlopenNetworkTests(unittest.TestCase):
"""Tests urllib.urlopen using the network.
These tests are not exhaustive. Assuming that testing using files does a
good job overall of some of the basic interface features. There are no
tests exercising the optional 'data' and 'proxies' arguments. No tests
for transparent redirection have been written.
setUp is not used for always constructing a connection to
http://www.python.org/ since there a few tests that don't use that address
and making a connection is expensive enough to warrant minimizing unneeded
connections.
"""
def urlopen(self, *args):
return _open_with_retry(urllib.urlopen, *args)
def test_basic(self):
# Simple test expected to pass.
open_url = self.urlopen("http://www.python.org/")
for attr in ("read", "readline", "readlines", "fileno", "close",
"info", "geturl"):
self.assertTrue(hasattr(open_url, attr), "object returned from "
"urlopen lacks the %s attribute" % attr)
try:
self.assertTrue(open_url.read(), "calling 'read' failed")
finally:
open_url.close()
def test_readlines(self):
# Test both readline and readlines.
open_url = self.urlopen("http://www.python.org/")
try:
self.assertIsInstance(open_url.readline(), basestring,
"readline did not return a string")
self.assertIsInstance(open_url.readlines(), list,
"readlines did not return a list")
finally:
open_url.close()
def test_info(self):
# Test 'info'.
open_url = self.urlopen("http://www.python.org/")
try:
info_obj = open_url.info()
finally:
open_url.close()
self.assertIsInstance(info_obj, mimetools.Message,
"object returned by 'info' is not an "
"instance of mimetools.Message")
self.assertEqual(info_obj.getsubtype(), "html")
def test_geturl(self):
# Make sure same URL as opened is returned by geturl.
URL = "http://www.python.org/"
open_url = self.urlopen(URL)
try:
gotten_url = open_url.geturl()
finally:
open_url.close()
self.assertEqual(gotten_url, URL)
def test_getcode(self):
# test getcode() with the fancy opener to get 404 error codes
URL = "http://www.python.org/XXXinvalidXXX"
open_url = urllib.FancyURLopener().open(URL)
try:
code = open_url.getcode()
finally:
open_url.close()
self.assertEqual(code, 404)
def test_fileno(self):
if (sys.platform in ('win32',) or
not hasattr(os, 'fdopen')):
# On Windows, socket handles are not file descriptors; this
# test can't pass on Windows.
return
# Make sure fd returned by fileno is valid.
open_url = self.urlopen("http://www.python.org/")
fd = open_url.fileno()
FILE = os.fdopen(fd)
try:
self.assertTrue(FILE.read(), "reading from file created using fd "
"returned by fileno failed")
finally:
FILE.close()
def test_bad_address(self):
# Make sure proper exception is raised when connecting to a bogus
# address.
bogus_domain = "sadflkjsasf.i.nvali.d"
try:
socket.gethostbyname(bogus_domain)
except socket.gaierror:
pass
else:
# This happens with some overzealous DNS providers such as OpenDNS
self.skipTest("%r should not resolve for test to work" % bogus_domain)
self.assertRaises(IOError,
# SF patch 809915: In Sep 2003, VeriSign started
# highjacking invalid .com and .net addresses to
# boost traffic to their own site. This test
# started failing then. One hopes the .invalid
# domain will be spared to serve its defined
# purpose.
# urllib.urlopen, "http://www.sadflkjsasadf.com/")
urllib.urlopen, "http://sadflkjsasf.i.nvali.d/")
class urlretrieveNetworkTests(unittest.TestCase):
"""Tests urllib.urlretrieve using the network."""
def urlretrieve(self, *args):
return _open_with_retry(urllib.urlretrieve, *args)
def test_basic(self):
# Test basic functionality.
file_location,info = self.urlretrieve("http://www.python.org/")
self.assertTrue(os.path.exists(file_location), "file location returned by"
" urlretrieve is not a valid path")
FILE = file(file_location)
try:
self.assertTrue(FILE.read(), "reading from the file location returned"
" by urlretrieve failed")
finally:
FILE.close()
os.unlink(file_location)
def test_specified_path(self):
# Make sure that specifying the location of the file to write to works.
file_location,info = self.urlretrieve("http://www.python.org/",
test_support.TESTFN)
self.assertEqual(file_location, test_support.TESTFN)
self.assertTrue(os.path.exists(file_location))
FILE = file(file_location)
try:
self.assertTrue(FILE.read(), "reading from temporary file failed")
finally:
FILE.close()
os.unlink(file_location)
def test_header(self):
# Make sure header returned as 2nd value from urlretrieve is good.
file_location, header = self.urlretrieve("http://www.python.org/")
os.unlink(file_location)
self.assertIsInstance(header, mimetools.Message,
"header is not an instance of mimetools.Message")
def test_data_header(self):
logo = "http://www.python.org/community/logos/python-logo-master-v3-TM.png"
file_location, fileheaders = self.urlretrieve(logo)
os.unlink(file_location)
datevalue = fileheaders.getheader('Date')
dateformat = '%a, %d %b %Y %H:%M:%S GMT'
try:
time.strptime(datevalue, dateformat)
except ValueError:
self.fail('Date value not in %r format', dateformat)
def test_main():
test_support.requires('network')
with test_support.check_py3k_warnings(
("urllib.urlopen.. has been removed", DeprecationWarning)):
test_support.run_unittest(URLTimeoutTest,
urlopenNetworkTests,
urlretrieveNetworkTests)
if __name__ == "__main__":
test_main()
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/test/test_urllibnet.py
|
Python
|
agpl-3.0
| 8,057 | 0.001614 |
from setuptools import find_packages, setup
from auspost_pac import __version__ as version
setup(
name='python-auspost-pac',
version=version,
license='BSD',
author='Sam Kingston',
author_email='sam@sjkwi.com.au',
description='Python API for Australia Post\'s Postage Assessment Calculator (pac).',
url='https://github.com/sjkingo/python-auspost-pac',
install_requires=[
'cached_property',
'frozendict',
'requests',
],
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python',
],
)
|
sjkingo/python-auspost-pac
|
setup.py
|
Python
|
bsd-2-clause
| 900 | 0.001111 |
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def subtreeWithAllDeepest(self, root: TreeNode) -> TreeNode:
def dfs(root):
if root is None:
return None, 0
left, ld = dfs(root.left)
right, rd = dfs(root.right)
if ld < rd:
return right, rd + 1
elif ld > rd:
return left, ld + 1
else:
return root, ld + 1
return dfs(root)[0]
|
jiadaizhao/LeetCode
|
0801-0900/0865-Smallest Subtree with all the Deepest Nodes/0865-Smallest Subtree with all the Deepest Nodes.py
|
Python
|
mit
| 610 | 0 |
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.utils import simplejson
from django.core.serializers.json import DateTimeAwareJSONEncoder
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import smart_unicode
EMITTERS = {}
def get_emitter(format):
try:
return EMITTERS[format]
except KeyError:
raise ValueError('No emitter registered for type %s' % format)
def register_emitter(name=None, content_type='text/plain'):
'''Decorator to register an emitter.
Parameters::
- ``name``: name of emitter ('json', 'xml', ...)
- ``content_type``: content type to serve response as
'''
def inner(func):
EMITTERS[name or func.__name__] = (func, content_type)
return inner
@register_emitter(content_type='application/json; charset=utf-8')
def json(request, data):
cb = request.GET.get('callback')
data = simplejson.dumps(data, cls=DateTimeAwareJSONEncoder,
ensure_ascii=False, indent=4)
return cb and ('%s(%s)' % (cb, data)) or data
@register_emitter(content_type='text/xml; charset=utf-8')
def xml(request, data):
stream = StringIO()
xml = SimplerXMLGenerator(stream, 'utf-8')
xml.startDocument()
xml.startElement('response', {})
to_xml(xml, data)
xml.endElement('response')
xml.endDocument()
return stream.getvalue()
def to_xml(xml, data):
if isinstance(data, (list, tuple)):
for item in data:
xml.startElement('resource', {})
to_xml(xml, item)
xml.endElement('resource')
elif isinstance(data, dict):
for key, value in data.iteritems():
xml.startElement(key, {})
to_xml(xml, value)
xml.endElement(key)
else:
xml.characters(smart_unicode(data))
|
j2a/django-simprest
|
simprest/emitters.py
|
Python
|
bsd-3-clause
| 1,874 | 0.001601 |
"""Virtual environment relocatable mixin."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import shutil
class RelocateMixin(object):
"""Mixin which adds the ability to relocate a virtual environment."""
def relocate(self, destination):
"""Configure the virtual environment for another path.
Args:
destination (str): The target path of the virtual environment.
Note:
This does not actually move the virtual environment. Is only
rewrites the metadata required to support a move.
"""
for activate in self.bin.activates:
activate.vpath = destination
for binfile in self.bin.files:
shebang = binfile.shebang
if shebang:
shebang = shebang.strip().split(os.linesep)
if len(shebang) == 1 and (
"python" in shebang[0] or "pypy" in shebang[0]
):
binfile.shebang = "#!{0}".format(
os.path.join(destination, "bin", "python")
)
elif len(shebang) == 3 and (
"python" in shebang[1] or "pypy" in shebang[1]
):
shebang[1] = "'''exec' {0} \"$0\" \"$@\"".format(
os.path.join(destination, "bin", "python")
)
binfile.shebang = os.linesep.join(shebang)
# Even though wheel is the official format, there are still several
# cases in the wild where eggs are being installed. Eggs come with the
# possibility of .pth files. Each .pth file contains the path to where
# a module can be found. To handle them we must recurse the entire
# venv file tree since they can be either at the root of the
# site-packages, bundled within an egg directory, or both.
original_path = self.path
original_abspath = self.abspath
dirs = [self]
while dirs:
current = dirs.pop()
dirs.extend(current.dirs)
for file_ in current.files:
if file_.abspath.endswith(".pth"):
content = ""
with open(file_.abspath, "r") as source:
# .pth files are almost always very small. Because of
# this we read the whole file as a convenience.
content = source.read()
# It's not certain whether the .pth will have a relative
# or absolute path so we replace both in order of most to
# least specific.
content = content.replace(original_abspath, destination)
content = content.replace(original_path, destination)
with open(file_.abspath, "w") as source:
source.write(content)
def move(self, destination):
"""Reconfigure and move the virtual environment to another path.
Args:
destination (str): The target path of the virtual environment.
Note:
Unlike `relocate`, this method *will* move the virtual to the
given path.
"""
self.relocate(destination)
shutil.move(self.path, destination)
self._path = destination
|
kevinconway/venvctrl
|
venvctrl/venv/relocate.py
|
Python
|
mit
| 3,455 | 0 |
from direct.distributed.ClockDelta import *
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
from DistributedNPCToonBase import *
from toontown.chat.ChatGlobals import *
from toontown.estate import BankGUI, BankGlobals
from toontown.nametag.NametagGlobals import *
from toontown.toonbase import TTLocalizer
class DistributedNPCBanker(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.jellybeanJar = None
self.bankGUI = None
def disable(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupBankingGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.bankGUI:
self.bankGUI.destroy()
self.av = None
base.localAvatar.posCamera(0, 0)
DistributedNPCToonBase.disable(self)
def resetClerk(self):
self.ignoreAll()
taskMgr.remove(self.uniqueName('popupBankingGUI'))
taskMgr.remove(self.uniqueName('lerpCamera'))
if self.bankGUI:
self.bankGUI.destroy()
self.clearMat()
self.startLookAround()
self.detectAvatars()
def handleCollisionSphereEnter(self, collEntry):
self.sendAvatarEnter()
self.nametag3d.setDepthTest(0)
base.cr.playGame.getPlace().setState('purchase')
self.nametag3d.setBin('fixed', 0)
def sendAvatarEnter(self):
self.sendUpdate('avatarEnter')
def setMovie(self, mode, avId, timestamp):
isLocalToon = avId == base.localAvatar.doId
timeStamp = globalClockDelta.localElapsedTime(timestamp)
self.remain = 60 - timeStamp
self.resetClerk()
if mode == BankGlobals.BANK_MOVIE_CLEAR:
if not avId:
self.setChatAbsolute('', CFSpeech | CFTimeout)
if isLocalToon:
self.freeAvatar()
elif mode == BankGlobals.BANK_MOVIE_TIMEOUT:
if isLocalToon:
self.cleanupBankingGUI()
self.freeAvatar()
self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG,
CFSpeech | CFTimeout)
elif mode == BankGlobals.BANK_MOVIE_DEPOSIT:
if isLocalToon:
self.cleanupBankingGUI()
self.freeAvatar()
self.setChatAbsolute(TTLocalizer.STOREOWNER_GOODBYE,
CFSpeech | CFTimeout)
elif mode == BankGlobals.BANK_MOVIE_GUI:
av = base.cr.doId2do.get(avId)
if av:
self.setupAvatars(av)
if isLocalToon:
self.hideNametag2d()
base.camera.wrtReparentTo(render)
seq = Sequence((base.camera.posQuatInterval(1, Vec3(-5, 9, self.getHeight() - 0.5),
Vec3(-150, -2, 0), other=self, blendType='easeOut',
name=self.uniqueName('lerpCamera'))))
seq.start()
taskMgr.doMethodLater(2.0, self.popupBankingGUI,
self.uniqueName('popupBankingGUI'))
self.setChatAbsolute(TTLocalizer.STOREOWNER_BANKING,
CFSpeech | CFTimeout)
def __handleBankingDone(self, transactionAmount):
self.sendUpdate('transferMoney', [transactionAmount])
def popupBankingGUI(self, task):
self.accept('bankDone', self.__handleBankingDone)
self.bankGUI = BankGUI.BankGUI('bankDone')
return task.done
def cleanupBankingGUI(self):
if self.bankGUI:
self.bankGUI.destroy()
self.bankGUI = None
def freeAvatar(self):
base.localAvatar.posCamera(0, 0)
if base.cr.playGame.getPlace():
base.cr.playGame.getPlace().setState('walk')
self.showNametag2d()
|
linktlh/Toontown-journey
|
toontown/toon/DistributedNPCBanker.py
|
Python
|
apache-2.0
| 3,769 | 0.001592 |
from . import common
import hglib
class test_branches(common.basetest):
def test_empty(self):
self.assertEquals(self.client.branches(), [])
def test_basic(self):
self.append('a', 'a')
rev0 = self.client.commit('first', addremove=True)
self.client.branch('foo')
self.append('a', 'a')
rev1 = self.client.commit('second')
branches = self.client.branches()
expected = []
for r, n in (rev1, rev0):
r = self.client.log(r)[0]
expected.append((r.branch, int(r.rev), r.node[:12]))
self.assertEquals(branches, expected)
def test_active_closed(self):
pass
|
beckjake/python3-hglib
|
tests/test-branches.py
|
Python
|
mit
| 674 | 0.001484 |
from django.conf.urls import patterns, include, url
urlpatterns = [
url(r'^$', 'clientes.views.clientes', name='clientes'),
url(r'^edit/(\d+)$', 'clientes.views.clientes_edit', name='editCliente'),
url(r'^delete/(\d+)$', 'clientes.views.clientes_delete', name='deleteCliente'),
]
|
macs03/autoservicio
|
clientes/urls.py
|
Python
|
mit
| 293 | 0.006826 |
from miasm2.core.asmblock import disasmEngine
from miasm2.arch.msp430.arch import mn_msp430
class dis_msp430(disasmEngine):
def __init__(self, bs=None, **kwargs):
super(dis_msp430, self).__init__(mn_msp430, None, bs, **kwargs)
|
stephengroat/miasm
|
miasm2/arch/msp430/disasm.py
|
Python
|
gpl-2.0
| 242 | 0 |
# This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2020 Alban 'spl0k' Féron
#
# Distributed under terms of the GNU AGPLv3 license.
from flask import request
from ..db import RadioStation
from . import get_entity, api_routing
from .exceptions import Forbidden, MissingParameter
@api_routing("/getInternetRadioStations")
def get_radio_stations():
query = RadioStation.select().sort_by(RadioStation.name)
return request.formatter(
"internetRadioStations",
{"internetRadioStation": [p.as_subsonic_station() for p in query]},
)
@api_routing("/createInternetRadioStation")
def create_radio_station():
if not request.user.admin:
raise Forbidden()
stream_url, name, homepage_url = map(
request.values.get, ("streamUrl", "name", "homepageUrl")
)
if stream_url and name:
RadioStation(stream_url=stream_url, name=name, homepage_url=homepage_url)
else:
raise MissingParameter("streamUrl or name")
return request.formatter.empty
@api_routing("/updateInternetRadioStation")
def update_radio_station():
if not request.user.admin:
raise Forbidden()
res = get_entity(RadioStation)
stream_url, name, homepage_url = map(
request.values.get, ("streamUrl", "name", "homepageUrl")
)
if stream_url and name:
res.stream_url = stream_url
res.name = name
if homepage_url:
res.homepage_url = homepage_url
else:
raise MissingParameter("streamUrl or name")
return request.formatter.empty
@api_routing("/deleteInternetRadioStation")
def delete_radio_station():
if not request.user.admin:
raise Forbidden()
res = get_entity(RadioStation)
res.delete()
return request.formatter.empty
|
spl0k/supysonic
|
supysonic/api/radio.py
|
Python
|
agpl-3.0
| 1,836 | 0.000545 |
# -*- coding: UTF-8 -*-
# Syntax definition automatically generated by hljs2xt.py
# source: sml.js
name = 'SML'
file_patterns = ['*.sml', '*.ml']
built_in = """
array bool char exn int list option order real ref string substring
vector unit word
""".split()
keyword = """
abstype and andalso as case datatype do else end eqtype exception fn
fun functor handle if in include infix infixr let local nonfix of op
open orelse raise rec sharing sig signature struct structure then
type val with withtype where while
""".split()
literal = ['true', 'false', 'NONE', 'SOME', 'LESS', 'EQUAL', 'GREATER', 'nil']
class comment:
default_text_color = DELIMITER
rules = [
# ignore {'begin': {'pattern': "\\b(a|an|the|are|I|I'm|isn't|don't|doesn't|won't|but|just|should|pretty|simply|enough|gonna|going|wtf|so|such|will|you|your|like)\\b", 'type': 'RegExp'}},
('doctag', [RE(r"(?:TODO|FIXME|NOTE|BUG|XXX):")]),
]
operator_escape = ('operator.escape', [RE(r"\\[\s\S]")])
class string:
default_text_color = DELIMITER
rules = [operator_escape]
number = [
RE(r"\b(?:0[xX][a-fA-F0-9_]+[Lln]?|0[oO][0-7_]+[Lln]?|0[bB][01_]+[Lln]?|[0-9][0-9_]*(?:[Lln]|(?:\.[0-9_]*)?(?:[eE][-+]?[0-9_]+)?)?)"),
]
rules = [
('built_in', built_in),
('keyword', keyword),
('literal', literal),
('literal', [RE(r"\[(?:\|\|)?\]|\(\)")]),
('comment', RE(r"\(\*"), [RE(r"\*\)")], comment),
('symbol', [RE(r"'[A-Za-z_](?!')[\w']*")]),
('type', [RE(r"`[A-Z][\w']*")]),
('type', [RE(r"\b[A-Z][\w']*")]),
# ignore {'begin': "[a-z_]\\w*'[\\w']*"},
('string', RE(r"'"), [RE(r"'")], string),
('string', RE(r"\""), [RE(r"\"")], string),
('number', number),
# ignore {'begin': {'pattern': '[-=]>', 'type': 'RegExp'}},
]
|
editxt/editxt
|
resources/syntax/sml.syntax.py
|
Python
|
gpl-3.0
| 1,797 | 0.003339 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket, struct, time
from hashlib import md5
import sys
import os
import random
# CONFIG
server = '172.16.192.111'
username=''
password=''
CONTROLCHECKSTATUS = '\x00'
ADAPTERNUM = '\x00'
host_ip = '0.210.30.0'
IPDOG = '\x00'
host_name = 'DRCOMFUCKER'
PRIMARY_DNS = '0.0.0.0'
dhcp_server = '0.0.0.0'
AUTH_VERSION = '\x20\x1a'
mac = 0xb888e3051680
host_os = 'WINDIAOS'
KEEP_ALIVE_VERSION = '\xdc\x02'
# CONFIG_END
nic_name = '' #Indicate your nic, e.g. 'eth0.2'.nic_name
bind_ip = '0.0.0.0'
class ChallengeException (Exception):
def __init__(self):
pass
class LoginException (Exception):
def __init__(self):
pass
def bind_nic():
try:
import fcntl
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
return get_ip_address(nic_name)
except ImportError as e:
print('Indicate nic feature need to be run under Unix based system.')
return '0.0.0.0'
except IOError as e:
print(nic_name + 'is unacceptable !')
return '0.0.0.0'
finally:
return '0.0.0.0'
if nic_name != '':
bind_ip = bind_nic()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((bind_ip, 61440))
s.settimeout(3)
SALT = ''
IS_TEST = True
# specified fields based on version
CONF = "/etc/drcom.conf"
UNLIMITED_RETRY = True
EXCEPTION = False
DEBUG = False #log saves to file
LOG_PATH = '/var/log/drcom_client.log'
if IS_TEST:
DEBUG = True
LOG_PATH = 'drcom_client.log'
def log(*args, **kwargs):
s = ' '.join(args)
print s
if DEBUG:
with open(LOG_PATH,'a') as f:
f.write(s + '\n')
def challenge(svr,ran):
while True:
t = struct.pack("<H", int(ran)%(0xFFFF))
s.sendto("\x01\x02"+t+"\x09"+"\x00"*15, (svr, 61440))
try:
data, address = s.recvfrom(1024)
log('[challenge] recv',data.encode('hex'))
except:
log('[challenge] timeout, retrying...')
continue
if address == (svr, 61440):
break
else:
continue
log('[DEBUG] challenge:\n' + data.encode('hex'))
if data[0] != '\x02':
raise ChallengeException
log('[challenge] challenge packet sent.')
return data[4:8]
def md5sum(s):
m = md5()
m.update(s)
return m.digest()
def dump(n):
s = '%x' % n
if len(s) & 1:
s = '0' + s
return s.decode('hex')
# def ror(md5, pwd):
# ret = ''
# for i in range(len(pwd)):
# x = ord(md5[i]) ^ ord(pwd[i])
# ret += chr(((x<<3)&0xFF) + (x>>5))
# return ret
def keep_alive_package_builder(number,random,tail,type=1,first=False):
data = '\x07'+ chr(number) + '\x28\x00\x0b' + chr(type)
if first :
data += '\x0f\x27'
else:
data += KEEP_ALIVE_VERSION
data += '\x2f\x12' + '\x00' * 6
data += tail
data += '\x00' * 4
#data += struct.pack("!H",0xdc02)
if type == 3:
foo = ''.join([chr(int(i)) for i in host_ip.split('.')]) # host_ip
#CRC
# edited on 2014/5/12, filled zeros to checksum
# crc = packet_CRC(data+foo)
crc = '\x00' * 4
#data += struct.pack("!I",crc) + foo + '\x00' * 8
data += crc + foo + '\x00' * 8
else: #packet type = 1
data += '\x00' * 16
return data
# def packet_CRC(s):
# ret = 0
# for i in re.findall('..', s):
# ret ^= struct.unpack('>h', i)[0]
# ret &= 0xFFFF
# ret = ret * 0x2c7
# return ret
def keep_alive2(*args):
#first keep_alive:
#number = number (mod 7)
#status = 1: first packet user sended
# 2: first packet user recieved
# 3: 2nd packet user sended
# 4: 2nd packet user recieved
# Codes for test
tail = ''
packet = ''
svr = server
ran = random.randint(0,0xFFFF)
ran += random.randint(1,10)
# 2014/10/15 add by latyas, maybe svr sends back a file packet
svr_num = 0
packet = keep_alive_package_builder(svr_num,dump(ran),'\x00'*4,1,True)
while True:
log('[keep-alive2] send1',packet.encode('hex'))
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep-alive2] recv1',data.encode('hex'))
if data.startswith('\x07\x00\x28\x00') or data.startswith('\x07' + chr(svr_num) + '\x28\x00'):
break
elif data[0] == '\x07' and data[2] == '\x10':
log('[keep-alive2] recv file, resending..')
svr_num = svr_num + 1
packet = keep_alive_package_builder(svr_num,dump(ran),'\x00'*4,1, False)
else:
log('[keep-alive2] recv1/unexpected',data.encode('hex'))
#log('[keep-alive2] recv1',data.encode('hex'))
ran += random.randint(1,10)
packet = keep_alive_package_builder(svr_num, dump(ran),'\x00'*4,1,False)
log('[keep-alive2] send2',packet.encode('hex'))
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == '\x07':
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv2/unexpected',data.encode('hex'))
log('[keep-alive2] recv2',data.encode('hex'))
tail = data[16:20]
ran += random.randint(1,10)
packet = keep_alive_package_builder(svr_num,dump(ran),tail,3,False)
log('[keep-alive2] send3',packet.encode('hex'))
s.sendto(packet, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == '\x07':
svr_num = svr_num + 1
break
else:
log('[keep-alive2] recv3/unexpected',data.encode('hex'))
log('[keep-alive2] recv3',data.encode('hex'))
tail = data[16:20]
log("[keep-alive2] keep-alive2 loop was in daemon.")
i = svr_num
while True:
try:
ran += random.randint(1,10)
packet = keep_alive_package_builder(i,dump(ran),tail,1,False)
#log('DEBUG: keep_alive2,packet 4\n',packet.encode('hex'))
log('[keep_alive2] send',str(i),packet.encode('hex'))
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[keep_alive2] recv',data.encode('hex'))
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 4 return\n',data.encode('hex'))
ran += random.randint(1,10)
packet = keep_alive_package_builder(i+1,dump(ran),tail,3,False)
#log('DEBUG: keep_alive2,packet 5\n',packet.encode('hex'))
s.sendto(packet, (svr, 61440))
log('[keep_alive2] send',str(i+1),packet.encode('hex'))
data, address = s.recvfrom(1024)
log('[keep_alive2] recv',data.encode('hex'))
tail = data[16:20]
#log('DEBUG: keep_alive2,packet 5 return\n',data.encode('hex'))
i = (i+2) % 0xFF
time.sleep(20)
keep_alive1(*args)
except:
pass
import re
def checksum(s):
ret = 1234
for i in re.findall('....', s):
ret ^= int(i[::-1].encode('hex'), 16)
ret = (1968 * ret) & 0xffffffff
return struct.pack('<I', ret)
def mkpkt(salt, usr, pwd, mac):
data = '\x03\x01\x00'+chr(len(usr)+20)
data += md5sum('\x03\x01'+salt+pwd)
data += usr.ljust(36, '\x00')
data += CONTROLCHECKSTATUS
data += ADAPTERNUM
data += dump(int(data[4:10].encode('hex'),16)^mac).rjust(6,'\x00') #mac xor md51
data += md5sum("\x01" + pwd + salt + '\x00'*4) #md52
data += '\x01' # number of ip
#data += '\x0a\x1e\x16\x11' #your ip address1, 10.30.22.17
data += ''.join([chr(int(i)) for i in host_ip.split('.')]) #x.x.x.x ->
data += '\00'*4 #your ipaddress 2
data += '\00'*4 #your ipaddress 3
data += '\00'*4 #your ipaddress 4
data += md5sum(data + '\x14\x00\x07\x0b')[:8] #md53
data += IPDOG
data += '\x00'*4 #delimeter
data += host_name.ljust(32, '\x00')
data += ''.join([chr(int(i)) for i in PRIMARY_DNS.split('.')]) #primary dns
data += ''.join([chr(int(i)) for i in dhcp_server.split('.')]) #DHCP server
data += '\x00\x00\x00\x00' #secondary dns:0.0.0.0
data += '\x00' * 8 #delimeter
data += '\x94\x00\x00\x00' # unknow
data += '\x05\x00\x00\x00' # os major
data += '\x01\x00\x00\x00' # os minor
data += '\x28\x0a\x00\x00' # OS build
data += '\x02\x00\x00\x00' #os unknown
data += host_os.ljust(32,'\x00')
data += '\x00' * 96
#data += '\x01' + host_os.ljust(128, '\x00')
#data += '\x0a\x00\x00'+chr(len(pwd)) # \0x0a represents version of client, algorithm: DRCOM_VER + 100
#data += ror(md5sum('\x03\x01'+salt+pwd), pwd)
data += AUTH_VERSION
data += '\x02\x0c'
data += checksum(data+'\x01\x26\x07\x11\x00\x00'+dump(mac))
data += '\x00\x00' #delimeter
data += dump(mac)
data += '\x00' # auto logout / default: False
data += '\x00' # broadcast mode / default : False
data += '\xe9\x13' #unknown, filled numbers randomly =w=
log('[mkpkt]',data.encode('hex'))
return data
def login(usr, pwd, svr):
import random
global SALT
i = 0
while True:
salt = challenge(svr,time.time()+random.randint(0xF,0xFF))
SALT = salt
packet = mkpkt(salt, usr, pwd, mac)
log('[login] send',packet.encode('hex'))
s.sendto(packet, (svr, 61440))
data, address = s.recvfrom(1024)
log('[login] recv',data.encode('hex'))
log('[login] packet sent.')
if address == (svr, 61440):
if data[0] == '\x04':
log('[login] loged in')
break
else:
log('[login] login failed.')
if IS_TEST:
time.sleep(3)
else:
time.sleep(30)
continue
else:
if i >= 5 and UNLIMITED_RETRY == False :
log('[login] exception occured.')
sys.exit(1)
else:
continue
log('[login] login sent')
#0.8 changed:
return data[23:39]
#return data[-22:-6]
def keep_alive1(salt,tail,pwd,svr):
foo = struct.pack('!H',int(time.time())%0xFFFF)
data = '\xff' + md5sum('\x03\x01'+salt+pwd) + '\x00\x00\x00'
data += tail
data += foo + '\x00\x00\x00\x00'
log('[keep_alive1] send',data.encode('hex'))
s.sendto(data, (svr, 61440))
while True:
data, address = s.recvfrom(1024)
if data[0] == '\x07':
break
else:
log('[keep-alive1]recv/not expected',data.encode('hex'))
log('[keep-alive1] recv',data.encode('hex'))
def empty_socket_buffer():
#empty buffer for some fucking schools
log('starting to empty socket buffer')
try:
while True:
data, address = s.recvfrom(1024)
log('recived sth unexpected',data.encode('hex'))
if s == '':
break
except:
# get exception means it has done.
log('exception in empty_socket_buffer')
pass
log('emptyed')
def daemon():
with open('/var/run/jludrcom.pid','w') as f:
f.write(str(os.getpid()))
def main():
if not IS_TEST:
daemon()
execfile(CONF, globals())
log("auth svr:"+server+"\nusername:"+username+"\npassword:"+password+"\nmac:"+str(hex(mac)))
log(bind_ip)
while True:
try:
package_tail = login(username, password, server)
except LoginException:
continue
log('package_tail',package_tail.encode('hex'))
#keep_alive1 is fucking bullshit!
empty_socket_buffer()
keep_alive1(SALT,package_tail,password,server)
keep_alive2(SALT,package_tail,password,server)
if __name__ == "__main__":
main()
|
drcoms/drcom-generic
|
custom/drcom_for_dlnu_520(x).py
|
Python
|
agpl-3.0
| 12,107 | 0.016519 |
# Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import webob.exc
from manila.api import extensions
from manila import db
from manila import exception
from manila import utils
LOG = log.getLogger(__name__)
authorize = extensions.extension_authorizer('share', 'services')
class ServiceController(object):
def index(self, req):
"""Return a list of all running services."""
context = req.environ['manila.context']
authorize(context)
all_services = db.service_get_all(context)
services = []
for service in all_services:
service = {
'id': service['id'],
'binary': service['binary'],
'host': service['host'],
'zone': service['availability_zone']['name'],
'status': 'disabled' if service['disabled'] else 'enabled',
'state': 'up' if utils.service_is_up(service) else 'down',
'updated_at': service['updated_at'],
}
services.append(service)
search_opts = [
'host',
'binary',
'zone',
'state',
'status',
]
for search_opt in search_opts:
value = ''
if search_opt in req.GET:
value = req.GET[search_opt]
services = [s for s in services if s[search_opt] == value]
if len(services) == 0:
break
return {'services': services}
def update(self, req, id, body):
"""Enable/Disable scheduling for a service."""
context = req.environ['manila.context']
authorize(context)
if id == "enable":
disabled = False
elif id == "disable":
disabled = True
else:
raise webob.exc.HTTPNotFound("Unknown action")
try:
host = body['host']
binary = body['binary']
except (TypeError, KeyError):
raise webob.exc.HTTPBadRequest()
try:
svc = db.service_get_by_args(context, host, binary)
if not svc:
raise webob.exc.HTTPNotFound('Unknown service')
db.service_update(context, svc['id'], {'disabled': disabled})
except exception.ServiceNotFound:
raise webob.exc.HTTPNotFound("service not found")
return {'host': host, 'binary': binary, 'disabled': disabled}
class Services(extensions.ExtensionDescriptor):
"""Services support."""
name = "Services"
alias = "os-services"
updated = "2012-10-28T00:00:00-00:00"
def get_resources(self):
resources = []
resource = extensions.ResourceExtension('os-services',
ServiceController())
resources.append(resource)
return resources
|
jcsp/manila
|
manila/api/contrib/services.py
|
Python
|
apache-2.0
| 3,444 | 0 |
from __future__ import print_function
from psychopy import sound, monitors, core, visual, event, data, gui, logging, info
import numpy as np
from copy import deepcopy
from math import atan, cos, sin, pi, sqrt, pow
import time, sys, platform, os, StringIO
from pandas import DataFrame
from calcUnderOvercorrect import calcOverCorrected
dirOrLocalize = True
autopilot = False
quitFinder = False
if quitFinder:
applescript="\'tell application \"Finder\" to quit\'" #quit Finder.
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
respClock = core.Clock(); myClock = core.Clock();
afterimageDurClock = core.Clock()
refreshRate = 75
ballStdDev = 0.8
autoLogging = False
participant = 'M'
fullscr=True
infoFirst = {'Participant':participant, 'Check refresh etc':True, 'Fullscreen (timing errors if not)': fullscr, 'Screen refresh rate':refreshRate }
OK = gui.DlgFromDict(dictionary=infoFirst,
title='Szinte & Cavanagh spatiotopic apparent motion',
order=[ 'Participant','Check refresh etc', 'Fullscreen (timing errors if not)'],
tip={'Check refresh etc': 'To confirm refresh rate and that can keep up, at least when drawing a grating'},
#fixed=['Check refresh etc'])#this attribute can't be changed by the user
)
if not OK.OK:
print('User cancelled from dialog box'); core.quit()
participant = infoFirst['Participant']
checkRefreshEtc = infoFirst['Check refresh etc']
fullscr = infoFirst['Fullscreen (timing errors if not)']
refreshRate = infoFirst['Screen refresh rate']
quitFinder = False
if checkRefreshEtc:
quitFinder = True
if quitFinder:
import os
applescript="\'tell application \"Finder\" to quit\'"
shellCmd = 'osascript -e '+applescript
os.system(shellCmd)
demo=False
respDeadline = 100
if autopilot:
respDeadline = 0.1
timeAndDateStr = time.strftime("%d%b%Y_%H-%M", time.localtime())
if os.path.isdir('.'+os.sep+'data'):
dataDir='data'
else:
print('"data" directory does not exist, so saving data in present working directory')
dataDir='.'
fileName = os.path.join(dataDir, participant+'_spatiotopicMotion_'+timeAndDateStr)
dataFile = open(fileName+'.txt', 'w') # sys.stdout #StringIO.StringIO()
saveCodeCmd = 'cp \'' + sys.argv[0] + '\' '+ fileName + '.py'
os.system(saveCodeCmd) #save a copy of the code as it was when that subject was run
logFname = fileName+'.log'
ppLogF = logging.LogFile(logFname,
filemode='w',#if you set this to 'a' it will append instead of overwriting
level=logging.INFO)#errors, data and warnings will be sent to this logfile
scrn=1 #1 means second screen
widthPix =1024#1024 #monitor width in pixels
heightPix =768#768 #monitor height in pixels
monitorwidth = 40. #28.5 #monitor width in centimeters
viewdist = 50.; #cm
pixelperdegree = widthPix/ (atan(monitorwidth/viewdist) / np.pi*180)
bgColor = [0,0,0] #"gray background"
allowGUI = False
waitBlank = False
windowAndMouseUnits = 'deg'
monitorname = 'mitsubishi' #in psychopy Monitors Center #Holcombe lab monitor
mon = monitors.Monitor(monitorname,width=monitorwidth, distance=viewdist)#fetch the most recent calib for this monitor
mon.setSizePix( (widthPix,heightPix) )
def openMyStimWindow(): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=mon,size=(widthPix,heightPix),allowGUI=allowGUI,units=windowAndMouseUnits,color=bgColor,colorSpace='rgb',fullscr=fullscr,
screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
return myWin
myWin = openMyStimWindow()
myWin.recordFrameIntervals = True #required by RunTimeInfo?
refreshMsg2 = ''
refreshRateWrong = False
if not checkRefreshEtc:
refreshMsg1 = 'REFRESH RATE WAS NOT CHECKED'
else: #checkRefreshEtc
try:
runInfo = info.RunTimeInfo(
# if you specify author and version here, it overrides the automatic detection of __author__ and __version__ in your script
#author='<your name goes here, plus whatever you like, e.g., your lab or contact info>',
#version="<your experiment version info>",
win=myWin, ## a psychopy.visual.Window() instance; None = default temp window used; False = no win, no win.flips()
refreshTest='grating', ## None, True, or 'grating' (eye-candy to avoid a blank screen)
verbose=False, ## True means report on everything
userProcsDetailed=False ## if verbose and userProcsDetailed, return (command, process-ID) of the user's processes
#seems to require internet access, probably for process lookup
)
#print(runInfo)
logging.info(runInfo)
print('Finished runInfo- which assesses the refresh and processes of this computer')
runInfo_failed = False
except:
runInfo_failed = True
refreshMsg1 = ' runInfo call FAILED so dont know refresh rate'
if not runInfo_failed:
refreshSDwarningLevel_ms = 3 ##ms
if runInfo["windowRefreshTimeSD_ms"] > refreshSDwarningLevel_ms:
print("\nThe variability of the refresh rate is high (SD > %.2f ms)." % (refreshSDwarningLevel_ms))
## and here you could prompt the user with suggestions, possibly based on other info:
if runInfo["windowIsFullScr"]:
print("Your window is full-screen, which is good for timing.")
print('Possible issues: internet / wireless? bluetooth? recent startup (not finished)?')
#if len(runInfo['systemUserProcFlagged']): #doesnt work if no internet
# print('other programs running? (command, process-ID):',info['systemUserProcFlagged'])
medianHz = 1000./runInfo['windowRefreshTimeMedian_ms']
refreshMsg1= 'Median frames per second ~='+ str( np.round(medianHz,1) )
refreshRateTolerancePct = 3
pctOff = abs( (medianHz-refreshRate) / refreshRate )
refreshRateWrong = pctOff > (refreshRateTolerancePct/100.)
if refreshRateWrong:
refreshMsg1 += ' BUT'
refreshMsg1 += ' program assumes ' + str(refreshRate)
refreshMsg2 = 'which is off by more than' + str(round(refreshRateTolerancePct,0)) + '%!!'
else:
refreshMsg1 += ', which is close enough to desired val of ' + str( round(refreshRate,1) )
myWinRes = myWin.size
myWin.allowGUI =True
myWin.close() #have to close window to show dialog box
myDlg = gui.Dlg(title="Screen check", pos=(200,400))
myDlg.addText(refreshMsg1, color='Black')
if refreshRateWrong:
myDlg.addText(refreshMsg2, color='Red')
if refreshRateWrong:
logging.error(refreshMsg1+refreshMsg2)
else: logging.info(refreshMsg1+refreshMsg2)
if checkRefreshEtc and (not demo) and (myWinRes != [widthPix,heightPix]).any():
msgWrongResolution = 'Screen apparently NOT the desired resolution of '+ str(widthPix)+'x'+str(heightPix)+ ' pixels!!'
myDlg.addText(msgWrongResolution, color='Red')
logging.error(msgWrongResolution)
print(msgWrongResolution)
myDlg.addText('Note: to abort press ESC at a trials response screen', color=[-1.,1.,-1.]) # color='DimGrey') color names stopped working along the way, for unknown reason
myDlg.show()
if myDlg.OK: #unpack information from dialogue box
pass
else:
print('User cancelled from dialog box.')
logging.flush()
core.quit()
if not demo:
allowGUI = False
myWin = openMyStimWindow()
targetDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (-.5, .3, -.5), size=ballStdDev,autoLog=autoLogging, contrast=1, opacity = 1.0)
foilDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (0, 0, 0 ),size=ballStdDev,autoLog=autoLogging, contrast=1, opacity = 1.0)
blackDot = visual.ImageStim(myWin,mask='circle',colorSpace='rgb', color = (-1,-1,-1),size=ballStdDev,autoLog=autoLogging, contrast=0.5, opacity = 1.0)
mouseLocationMarker = visual.Circle(myWin,units=windowAndMouseUnits,radius=ballStdDev/2.)#,autoLog=autoLogging)
mouseLocationMarker.setFillColor((-.5,-.5,-.5), colorSpace='rgb')
clickContinueArea = visual.Rect(myWin,units='norm',width=1,height=.6,fillColor=(-.6,-.6,0),autoLog=autoLogging)
clickContinueAreaX = 1; clickContinueAreaY = 1
mouseLocationMarker.setFillColor((-.5,-.5,-.5), colorSpace='rgb')
beforeFirstTrialText = visual.TextStim(myWin,pos=(0, .8),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='top', height = 0.05, units='norm',autoLog=autoLogging)
respPromptText = visual.TextStim(myWin,pos=(0, -.3),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='center', height = 0.07, units='norm',autoLog=autoLogging)
betweenTrialsText = visual.TextStim(myWin,pos=(0, -.4),colorSpace='rgb',color = (-1,-1,-1),alignHoriz='center', alignVert='center',height=.03,units='norm',autoLog=autoLogging)
nextRemindCountText = visual.TextStim(myWin,colorSpace='rgb',color= (1,1,1),alignHoriz='center', alignVert='top',height=.03,units='norm',autoLog=autoLogging)
nextRemindCountTextX = 0.8; nextRemindCountTextY = .8
locationOfProbe= np.array([[0,1.5]]) # np.array([[-10,1.5],[0,1.5],[10,1.5]]) #left, centre, right
#Potential other conditions:[-10,6.5],[0,6.5],[10,6.5],[-10,-3.5],[0,-3.5],[10,-3.5]
stimList=[]
for locus in locationOfProbe: #location of the probe for the trial
probeLocationY = locus[1]
for upDown in [False,True]: #switching between probe moving top to bottom; and bottom to top
for startLeft in [False,True]:
for infoRightSide in [False,True]: #text between trials and continue area on left side or right side
tilts = [0]
if dirOrLocalize:
tilts = [0]
for tilt in tilts: # [-2,0,2]: # [-0.875,0,0.875]: #adjusting whether the probe jump is vertical, or slanted. Tilt positive means second position to right
probeLocationX = locus[0]
stimList.append({'infoRightSide':infoRightSide,'probeX': probeLocationX, 'probeY':probeLocationY, 'startLeft':startLeft, 'upDown': upDown, 'tilt': tilt})
blockReps = 3
trials = data.TrialHandler(stimList, blockReps)
thisTrial = trials.next()
previewCycles = 0
normalCycles = 1
#durations in frames
initialDur = round(0.2*refreshRate) #target and foil dot without probe
probeFirstDisappearance = round(1.2*refreshRate) # probe disappears whilst target and foil dot remain the same
switchCues = round(1.1*refreshRate) # target and foil dots switch positions
probeSecondAppearance = 9999 # probe returns on the other side of the horizontal meridian for 400 ms
probeSecondDisappearance = 9999 # probe disappears
oneCycleFrames = int( round( 2*switchCues ) )
totFrames = oneCycleFrames*normalCycles
def oneFrameOfStim(n,nWhenAfterimage,finished,targetDotPos,foilDotPos,probePos1,probePos2): #trial stimulus function
targetDotPosThis = deepcopy(targetDotPos) #dont change starting value
foilDotPosThis = deepcopy(foilDotPos)
#previewFrames = previewCycles*oneCycleFrames #First the target dot left->right->left->right to get eye movements in swing of things
#cycleFrame = n % oneCycleFrames
#After he presses key, jump so move eyes
if nWhenAfterimage <= n: #target and foil in exchanged positions
targetDotPosThis *=-1
foilDotPosThis *= -1
targetDot.pos= (targetDotPosThis)
foilDot.pos= (foilDotPosThis)
if n> nWhenAfterimage+5:
finished = True #time to start waiting for mouse click to record response
if n <= initialDur: #show target and foil only, because first part of trial
pass #dont draw black dot, dont change positions
elif initialDur <= n < probeFirstDisappearance: #show first position of probe
#if n >= previewFrames: #dont draw probe for first two cycles
blackDot.pos = (probePos1)
blackDot.draw()
else:
if nWhenAfterimage == 9999: #need to wait until person presses key to indicate afterimage has built up
event.clearEvents() #clear keypresses and mouse clicks
waitingForPress = True
waitStartN = n
while waitingForPress and ( (n-waitStartN)/refreshRate < respDeadline ):
if n%4 <2:
blackDot.draw()
targetDot.draw()
foilDot.draw()
myWin.flip()
n+=1
keysPressed = event.getKeys()
if 'space' in keysPressed:
waitingForPress = False
nWhenAfterimage = n
afterimageDurClock.reset()
if autopilot:
nWhenAfterimage =0
# if probeSecondAppearance <= cycleFrame < probeSecondDisappearance: #probe in new location
# if n >= previewFrames: #dont draw probe for first two cycles
# blackDot.pos = (probePos2)
# blackDot.draw()
targetDot.draw()
foilDot.draw()
myWin.flip()
return n, nWhenAfterimage, finished
if dirOrLocalize:
myMouse = event.Mouse(visible = 'False',win=myWin)
header = 'trialnum\tsubject\tinfoRightside\tprobeX\tprobeY\tprobePos1X\tprobePos1Y\tstartLeft\tupDown\ttilt\tjitter\trespX\trespY\tdX\tdY\tafterimageGenesis\tafterimageDur'
else:
header = 'trialnum\tsubject\tprobeX\tprobeY\tprobePos1X\tprobePos1Y\tstartLeft\tupDown\ttilt\tjitter\trespLeftRight'
print(header, file=dataFile)
def collectResponse(expStop, dirOrLocalize, stuffToDrawOnRespScreen):
#if dirOrLocalize True, that means participant must click on a location, not report direction of motion
afterimageDur = -999
if dirOrLocalize: #collect mouse click
waitingForClick = True
mouseMovedYet = False
myMouse.getRel() #resets relative so can detect first time mouse moves, and only then draw the marker
while waitingForClick and respClock.getTime() < respDeadline:
if (myMouse.getRel()).any():
mouseMovedYet = True
m_x, m_y = myMouse.getPos() # in the same units as the Window
mouseLocationMarker.setPos((m_x, m_y)) #Because mouseLocationMarker is in same units as windowAndMouseUnits, and mouse returns windowAndMouseUnits, this has to work
mouse1, mouse2, mouse3 = myMouse.getPressed()
if mouse1 or mouse2 or mouse3:
waitingForClick = False
afterimageDur = afterimageDurClock.getTime()
#print('afterimageDur=',afterimageDur) #debugOFF
keysPressed = event.getKeys()
if 'escape' in keysPressed:
expStop = True
if mouseMovedYet:
mouseLocationMarker.draw() #dont draw marker until mouse moves for the first time
for x in stuffToDrawOnRespScreen:
x.draw()
myWin.flip()
if expStop or waitingForClick: #person never responded, but timed out. Presumably because of autopilot or hit escape
m_x = 0.0
m_y = 0.0
return (expStop, (m_x, m_y), afterimageDur)
else: #not dirOrLocalize, so report direction with arrow key
keysPressed = event.waitKeys(maxWait = respDeadline, keyList = ['left','right','escape'], timeStamped = False)
if keysPressed is None:
keysPressed = ['-99'] #because otherwise testing what's in it gives error
if autopilot and ('escape' not in keysPressed): #optionally person can press key, like esc to abort
keysPressed = ['right']
if 'escape' in keysPressed:
expStop=True
if 'left' in keysPressed: #recoding key presses as 0 (anticlockwise) or 1 (clockwise) for data analysis
respLeftRight = 0
else:
respLeftRight = 1
resp = respLeftRight
return (expStop, resp, afterimageDur)
if dirOrLocalize:
respPromptText.setText("")
instructns = ("During a trial, follow the green dot with your eyes."
"Always look at the green dot, but attend to the black dot that appears. "
"At the end of the trial please click on where you see the afterimage of the dot. ")
else:
instructns = ("During a trial, follow the green dot with your eyes. "
"Always look at the green dot, but attend to the black dot that will either move upwards or downwards during the "
"trial. At the end of the trial you are required to identify whether the black dot moved (slightly) to the left "
"or the right. Mostly it will have jumped vertically but with a slight left or right offset. "
"Press the left arrow for left, \n"
"or the right arrow for right ")
respPromptText.setText("<---- left right ---->")
beforeFirstTrialText.setText(instructns)
def waitBeforeTrial(nDone,respDeadline,expStop,stuffToDrawOnRespScreen):
#displayDraw is a function instance to call to draw what want to between trials
if dirOrLocalize:
betweenTrialsText.setText('CLICK in blue area to continue')
clickContinueArea.draw()
else:
betweenTrialsText.setText('Press SPACE to continue')
progressMsg = 'Completed ' + str(nDone) + ' of ' + str(trials.nTotal) + ' trials'
nextRemindCountText.setText(progressMsg)
event.clearEvents() #clear keypresses and mouse clicks
myClock.reset();
if dirOrLocalize:
betweenTrialsText.setText('CLICK in blue area to continue')
waitingForClick = True
while waitingForClick and respClock.getTime() < respDeadline:
m_x, m_y = myMouse.getPos() # in the same units as the Window
mouseLocationMarker.setPos((m_x, m_y)) #Because mouseLocationMarker is in same units as windowAndMouseUnits, and mouse returns windowAndMouseUnits, this has to work
mouse1, mouse2, mouse3 = myMouse.getPressed()
if myMouse.isPressedIn(clickContinueArea):
waitingForClick = False
event.clearEvents()
if waitingForClick and (mouse1 or mouse2 or mouse3):
myWin.flip(); myWin.flip() #flicker everything to tell user registered your click but it's in wrong place
keysPressed = event.getKeys()
if 'escape' in keysPressed:
expStop = True
for x in stuffToDrawOnRespScreen:
x.draw()
betweenTrialsText.setText('CLICK in blue area to continue')
if nDone==0:
beforeFirstTrialText.draw()
clickContinueArea.draw()
mouseLocationMarker.draw()
betweenTrialsText.draw()
nextRemindCountText.draw()
myWin.flip()
if not expStop and not waitingForClick: #person never responded, but timed out. Presumably because of autopilot or hit escape
waitingForPressBetweenTrials = True
betweenTrialsText.setText('While looking at the green dot, press SPACE to continue')
while waitingForPressBetweenTrials and myClock.getTime() < respDeadline:
if nDone==0:
beforeFirstTrialText.draw()
respPromptText.draw()
betweenTrialsText.draw()
for x in stuffToDrawOnRespScreen:
x.draw()
myWin.flip()
for key in event.getKeys(): #check if pressed abort-type key
if key in ['escape']:
expStop = True; waitingForPressBetweenTrials=False
if key in ['space']:
waitingForPressBetweenTrials=False
return expStop
#end waiting between trials
expStop = False
nDone = 0
while nDone < trials.nTotal and not expStop:
clickContinueArea.setPos( (clickContinueAreaX*(2*thisTrial['infoRightSide']-1), clickContinueAreaY) ) #left or right side
nextRemindCountText.setPos( (nextRemindCountTextX*(2*thisTrial['infoRightSide']-1), nextRemindCountTextY) )
if thisTrial['startLeft']:
targetDotPos=np.array([-5,0]) #target of saccades starts on left.
foilDotPos =np.array([5,0])
else: #target starts on right
targetDotPos=np.array([5,0]) #position of the green and grey stimulus for second half of trials - right to left
foilDotPos =np.array([-5,0])
yMultiplier = thisTrial['upDown']
if not thisTrial['upDown']:
yMultiplier = -1
jitter = np.random.uniform(-2,2)
probePos1= [ thisTrial['probeX']+jitter-thisTrial['tilt'], thisTrial['probeY']*yMultiplier ]
probePos2 =[ thisTrial['probeX']+jitter+thisTrial['tilt'], probePos1[1]*-1 ] #y of second location is simply vertical reflection of position 1
targetDot.setPos(targetDotPos)
foilDot.setPos(foilDotPos)
expStop = waitBeforeTrial(nDone, respDeadline, expStop, stuffToDrawOnRespScreen=(targetDot,foilDot)) #show first frame over and over
event.clearEvents() #clear keypresses and mouse clicks
nWhenAfterimage = 9999 #record nWhenAfterImage starts
finished = False
n=0
while not finished: #Loop for the trial STIMULUS
n, nWhenAfterimage, finished = oneFrameOfStim(n,nWhenAfterimage,finished, targetDotPos,foilDotPos,probePos1,probePos2)
n +=1
afterimageGenesis = (nWhenAfterimage - probeFirstDisappearance) / refreshRate
event.clearEvents() #clear keypresses and mouse clicks
respPromptText.setPos([0,-.5]) #low down so doesnt interfere with apparent motion
respPromptText.draw()
targetDot.draw()
foilDot.draw()
myWin.flip()
#myMouse.setPos((-5,-2)) #setPos only works for pygame window, not pyglet that psychopy using now
#myMouse = event.Mouse(visible = 'False',win=myWin)
#myMouse.setVisible(True)
expStop,resp,afterimageDur = collectResponse(expStop, dirOrLocalize, stuffToDrawOnRespScreen=(targetDot,foilDot,respPromptText))
#myMouse = event.Mouse(visible = 'False',win=myWin)
#myMouse.setVisible(False)
if not expStop:
if nDone==0: #initiate results dataframe
df = DataFrame(thisTrial, index=[nDone],
columns = ['probeX','probeY','startLeft','tilt','upDown']) #columns included purely to specify their order
df['jitter'] = jitter
if dirOrLocalize:
df['respX'] = resp[0]
df['respY'] = resp[1]
df['dx'] = resp[0] - probePos1[0]
df['dy'] = resp[1] - probePos1[1]
df['afterimageGenesis'] = afterimageGenesis
df['afterimageDur'] = afterimageDur
else:
df['respLeftRight'] = resp
else: #Not first trial. Add this trial
df= df.append( thisTrial, ignore_index=True ) #ignore because I got no index (rowname)
df['jitter'][nDone] = jitter
if dirOrLocalize:
print("resp= ",resp)
df['respX'][nDone] = resp[0]
df['respY'][nDone] = resp[1]
df['dx'][nDone] = resp[0] - probePos1[0]
df['dy'][nDone] = resp[1] - probePos1[1]
df['afterimageGenesis'][nDone] = afterimageGenesis
df['afterimageDur'][nDone] = afterimageDur
else:
df['respLeftRight'][nDone] = resp
print( df.loc[nDone-1:nDone] ) #print this trial and previous trial, only because theres no way to print object (single record) in wide format
#Should be able to print from the dataFrame in csv format
# header = 'trialnum\tsubject\tinfoRightside\tprobeX\tprobeY\tprobePos1X\tprobePos1Y\tstartLeft\tupDown\ttilt\tjitter\trespX\trespY\tdX\tdY\tafterimageGenesis'
oneTrialOfData = (str(nDone)+'\t'+participant+'\t'+ "%r\t"%thisTrial['infoRightSide'] + "%2.2f\t"%thisTrial['probeX'] + "%2.2f\t"%thisTrial['probeY'] + "%2.2f\t"%probePos1[0] +
"%2.2f\t"%probePos1[1] + "%r\t"%thisTrial['startLeft'] +"%r\t"%thisTrial['upDown'] + "%r\t"%thisTrial['tilt'] + "%r\t"%jitter)
if dirOrLocalize:
oneTrialOfData += ("%.2f\t"%df['respX'][nDone] + "%.2f\t"%df['respY'][nDone] + "%.2f\t"%df['dx'][nDone] + "%.2f\t"%df['dy'][nDone] +
"%.2f\t"%afterimageGenesis + "%.2f"%afterimageDur)
else:
oneTrialOfData += "%r"%resp
print(oneTrialOfData, file= dataFile)
if nDone< trials.nTotal-1:
thisTrial=trials.next()
myWin.clearBuffer()
nDone+=1
dataFile.flush(); logging.flush()
myWin.close()
if expStop:
print("Experiment stopped because user stopped it.")
else:
print("Experiment finished")
if nDone >0:
#Use pandas to calculate proportion correct at each level
#The df.dtypes in my case are "objects". I don't know what that is and you can't take the mean
#print('df.dtypes=\n',df.dtypes)
df = df.convert_objects(convert_numeric=True) #convert dtypes from object to numeric
#df['dist'] =
#analyze cases where tilt==0
tilt = df.loc[:,'tilt']
neutralStimIdxs = df.loc[tilt==0]
neutralStimIdxs = (tilt==0)
if len(neutralStimIdxs)>1:
if neutralStimIdxs.any(): #Calculate over/under-correction, which is only interpretable when tilt=0
df['overCorrected']= np.nan
if not dirOrLocalize:
forCalculatn = df.loc[neutralStimIdxs, ['tilt','startLeft','upDown','respLeftRight']]
overCorrected = calcOverCorrected( forCalculatn )
print('overCorrected=\n',overCorrected)
df.loc[neutralStimIdxs, 'overCorrected'] = overCorrected
#print('dataframe with answer added=\n',df) #debug
#Summarise under over correct
print('For 0 tilt, overcorrection responses=', round( 100*df['overCorrected'].mean(), 2),
'% of ', df['overCorrected'].count(), ' trials', sep='')
#Calculate mean for each factor level
zeroTiltOnly = df.loc[neutralStimIdxs,:]
startLeft = zeroTiltOnly.groupby('startLeft')
print('Summary of startLeft\n',startLeft.mean())
upDown= zeroTiltOnly.groupby('upDown')
print('Summary of upDown\n',upDown.mean())
tiltGrp= df.groupby('tilt')
print('Summary of tilt\n',tiltGrp.mean())
|
alexholcombe/spatiotopic-motion
|
dotLocalize.py
|
Python
|
mit
| 26,405 | 0.023859 |
"""
Support for RFXtrx switches.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.rfxtrx/
"""
import logging
import homeassistant.components.rfxtrx as rfxtrx
from homeassistant.components.rfxtrx import (
ATTR_FIREEVENT, ATTR_NAME, ATTR_PACKETID, ATTR_STATE, EVENT_BUTTON_PRESSED)
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.util import slugify
DEPENDENCIES = ['rfxtrx']
SIGNAL_REPETITIONS = 1
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
"""Setup the RFXtrx platform."""
import RFXtrx as rfxtrxmod
# Add switch from config file
switchs = []
signal_repetitions = config.get('signal_repetitions', SIGNAL_REPETITIONS)
for device_id, entity_info in config.get('devices', {}).items():
if device_id in rfxtrx.RFX_DEVICES:
continue
_LOGGER.info("Add %s rfxtrx.switch", entity_info[ATTR_NAME])
# Check if i must fire event
fire_event = entity_info.get(ATTR_FIREEVENT, False)
datas = {ATTR_STATE: False, ATTR_FIREEVENT: fire_event}
rfxobject = rfxtrx.get_rfx_object(entity_info[ATTR_PACKETID])
newswitch = RfxtrxSwitch(
entity_info[ATTR_NAME], rfxobject, datas,
signal_repetitions)
rfxtrx.RFX_DEVICES[device_id] = newswitch
switchs.append(newswitch)
add_devices_callback(switchs)
def switch_update(event):
"""Callback for sensor updates from the RFXtrx gateway."""
if not isinstance(event.device, rfxtrxmod.LightingDevice) or \
event.device.known_to_be_dimmable:
return
# Add entity if not exist and the automatic_add is True
device_id = slugify(event.device.id_string.lower())
if device_id not in rfxtrx.RFX_DEVICES:
automatic_add = config.get('automatic_add', False)
if not automatic_add:
return
_LOGGER.info(
"Automatic add %s rfxtrx.switch (Class: %s Sub: %s)",
device_id,
event.device.__class__.__name__,
event.device.subtype
)
pkt_id = "".join("{0:02x}".format(x) for x in event.data)
entity_name = "%s : %s" % (device_id, pkt_id)
datas = {ATTR_STATE: False, ATTR_FIREEVENT: False}
signal_repetitions = config.get('signal_repetitions',
SIGNAL_REPETITIONS)
new_switch = RfxtrxSwitch(entity_name, event, datas,
signal_repetitions)
rfxtrx.RFX_DEVICES[device_id] = new_switch
add_devices_callback([new_switch])
# Check if entity exists or previously added automatically
if device_id in rfxtrx.RFX_DEVICES:
_LOGGER.debug(
"EntityID: %s switch_update. Command: %s",
device_id,
event.values['Command']
)
if event.values['Command'] == 'On'\
or event.values['Command'] == 'Off':
# Update the rfxtrx device state
is_on = event.values['Command'] == 'On'
# pylint: disable=protected-access
rfxtrx.RFX_DEVICES[device_id]._state = is_on
rfxtrx.RFX_DEVICES[device_id].update_ha_state()
# Fire event
if rfxtrx.RFX_DEVICES[device_id].should_fire_event:
rfxtrx.RFX_DEVICES[device_id].hass.bus.fire(
EVENT_BUTTON_PRESSED, {
ATTR_ENTITY_ID:
rfxtrx.RFX_DEVICES[device_id].entity_id,
ATTR_STATE: event.values['Command'].lower()
}
)
# Subscribe to main rfxtrx events
if switch_update not in rfxtrx.RECEIVED_EVT_SUBSCRIBERS:
rfxtrx.RECEIVED_EVT_SUBSCRIBERS.append(switch_update)
class RfxtrxSwitch(SwitchDevice):
"""Representation of a RFXtrx switch."""
def __init__(self, name, event, datas, signal_repetitions):
"""Initialize the switch."""
self._name = name
self._event = event
self._state = datas[ATTR_STATE]
self._should_fire_event = datas[ATTR_FIREEVENT]
self.signal_repetitions = signal_repetitions
@property
def should_poll(self):
"""No polling needed for a RFXtrx switch."""
return False
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def should_fire_event(self):
"""Return is the device must fire event."""
return self._should_fire_event
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
def turn_on(self, **kwargs):
"""Turn the device on."""
if not self._event:
return
for _ in range(self.signal_repetitions):
self._event.device.send_on(rfxtrx.RFXOBJECT.transport)
self._state = True
self.update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if not self._event:
return
for _ in range(self.signal_repetitions):
self._event.device.send_off(rfxtrx.RFXOBJECT.transport)
self._state = False
self.update_ha_state()
|
justyns/home-assistant
|
homeassistant/components/switch/rfxtrx.py
|
Python
|
mit
| 5,695 | 0 |
from __future__ import print_function
from scipy.ndimage import gaussian_filter
import numpy as np
from PIL import Image
img = np.asarray(Image.open('../Klimt/Klimt.ppm'))
img_gray = np.asarray(Image.open('../Klimt/Klimt.pgm'))
print('img:', img.shape)
sigmas = [0.5, 2, 5, 7]
for sigma in sigmas:
print('sigma:', sigma)
# # do not filter across channels
# https://github.com/scikit-image/scikit-image/blob/fca9f16da4bd7420245d05fa82ee51bb9677b039/skimage/filters/_gaussian.py#L12-L126
img_blur = Image.fromarray(gaussian_filter(img, sigma=(sigma, sigma, 0), mode = 'nearest'))
img_blur.save('Klimt_RGB_Gaussian_blur_sigma={:.1f}.png'.format(sigma))
img_blur = Image.fromarray(gaussian_filter(img_gray, sigma=sigma, mode = 'nearest'))
img_blur.save('Klimt_gray_Gaussian_blur_sigma={:.1f}.png'.format(sigma))
|
lagadic/ViSP-images
|
Gaussian-filter/Gaussian_filter.py
|
Python
|
gpl-2.0
| 838 | 0.008353 |
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import time
import MPR121
print('Adafruit MPR121 Capacitive Touch Sensor Test')
# Create MPR121 instance.
cap = MPR121.MPR121()
# Initialize communication with MPR121 using default I2C bus of device, and
# default I2C address (0x5A). On BeagleBone Black will default to I2C bus 0.
if not cap.begin():
print('Error initializing MPR121. Check your wiring!')
sys.exit(1)
#cap.set_thresholds(6, 12)
# Alternatively, specify a custom I2C address such as 0x5B (ADDR tied to 3.3V),
# 0x5C (ADDR tied to SDA), or 0x5D (ADDR tied to SCL).
#cap.begin(address=0x5B)
# Also you can specify an optional I2C bus with the bus keyword parameter.
#cap.begin(busnum=1)
# Main loop to print a message every time a pin is touched.
print('Press Ctrl-C to quit.')
while True:
current_touched = cap.touched()
print(current_touched)
# # Check each pin's last and current state to see if it was pressed or released.
# for i in range(12):
# # Each pin is represented by a bit in the touched value. A value of 1
# # means the pin is being touched, and 0 means it is not being touched.
# pin_bit = 1 << i
# # First check if transitioned from not touched to touched.
# if current_touched & pin_bit and not last_touched & pin_bit:
# print('{0} touched!'.format(i))
# # Next check if transitioned from touched to not touched.
# if not current_touched & pin_bit and last_touched & pin_bit:
# print('{0} released!'.format(i))
# # Update last state and wait a short period before repeating.
# last_touched = current_touched
# Alternatively, if you only care about checking one or a few pins you can
# call the is_touched method with a pin number to directly check that pin.
# This will be a little slower than the above code for checking a lot of pins.
# if cap.is_touched(2):
# print('Pin 2 is being touched!')
# elif cap.is_touched(7):
# print('Pin 7 is being touched!')
# elif cap.is_touched(8):
# print('Pin 8 is being touched!')
# else:
# # If you're curious or want to see debug info for each pin, uncomment the
#following lines:
# print ('\t\t\t\t\t\t\t\t\t\t\t\t\t 0x{0:0X}'.format(cap.touched()))
# filtered = [cap.filtered_data(i) for i in range(12)]
# print('Filt:', '\t'.join(map(str, filtered)))
# base = [cap.baseline_data(i) for i in range(12)]
# print('Base:', '\t'.join(map(str, base)))
time.sleep(0.1)
|
eddieruano/Sentinel
|
assist/Adafruit_Python_MPR121/examples/simpletest.py
|
Python
|
apache-2.0
| 3,622 | 0.002485 |
# Copyright (C) 2014 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import logging
import gevent
import conpot.core as conpot_core
logger = logging.getLogger(__name__)
# Simulates power usage for a Kamstrup 382 meter
class UsageSimulator(object):
def __init__(self, *args):
# both highres, lowres will be calculated on the fly
self.energy_in = 0
self.energy_out = 0
# p1, p2, p3
self.voltage = [0, 0, 0]
self.current = [0, 0, 0]
self.power = [0, 0, 0]
gevent.spawn(self.initialize)
def usage_counter(self):
while True:
# since this is gevent, this is actually sleep for _at least_ 1 second
# TODO: measure last entry and figure it out
gevent.sleep(1)
for x in [0, 1, 2]:
self.energy_in += int(self.power[x] * 0.0036)
def initialize(self):
# we need the databus initialized before we can probe values
databus = conpot_core.get_databus()
databus.initialized.wait()
# accumulated counter
energy_in_register = 'register_13'
self.energy_in = databus.get_value(energy_in_register)
databus.set_value(energy_in_register, self._get_energy_in)
databus.set_value('register_1', self._get_energy_in_lowres)
energy_out_register = 'register_14'
self.energy_out = databus.get_value(energy_out_register)
databus.set_value(energy_out_register, self._get_energy_out)
databus.set_value('register_2', self._get_energy_out_lowres)
volt_1_register = 'register_1054'
self.voltage[0] = databus.get_value(volt_1_register)
databus.set_value(volt_1_register, self._get_voltage_1)
volt_2_register = 'register_1055'
self.voltage[1] = databus.get_value(volt_2_register)
databus.set_value(volt_2_register, self._get_voltage_2)
volt_3_register = 'register_1056'
self.voltage[2] = databus.get_value(volt_3_register)
databus.set_value(volt_3_register, self._get_voltage_3)
current_1_register = 'register_1076'
self.current[0] = databus.get_value(current_1_register)
databus.set_value(current_1_register, self._get_current_1)
current_2_register = 'register_1077'
self.current[1] = databus.get_value(current_2_register)
databus.set_value(current_2_register, self._get_current_2)
current_3_register = 'register_1078'
self.current[2] = databus.get_value(current_3_register)
databus.set_value(current_3_register, self._get_current_3)
power_1_register = 'register_1080'
self.power[0] = databus.get_value(power_1_register)
databus.set_value(power_1_register, self._get_power_1)
power_2_register = 'register_1081'
self.power[1] = databus.get_value(power_2_register)
databus.set_value(power_2_register, self._get_power_2)
power_3_register = 'register_1082'
self.power[2] = databus.get_value(power_3_register)
databus.set_value(power_3_register, self._get_power_3)
gevent.spawn(self.usage_counter)
def _get_energy_in(self):
return self.energy_in
def _get_energy_out(self):
return self.energy_out
def _get_energy_in_lowres(self):
return self.energy_in / 1000
def _get_energy_out_lowres(self):
return self.energy_out / 1000
def _get_voltage_1(self):
return self.voltage[0]
def _get_voltage_2(self):
return self.voltage[1]
def _get_voltage_3(self):
return self.voltage[2]
def _get_current_1(self):
return self.current[0]
def _get_current_2(self):
return self.current[1]
def _get_current_3(self):
return self.current[2]
def _get_power_1(self):
return self.power[0]
def _get_power_2(self):
return self.power[1]
def _get_power_3(self):
return self.power[2]
|
soltraconpotprojectNLDA/SoltraConpot
|
conpot/protocols/kamstrup/usage_simulator.py
|
Python
|
gpl-2.0
| 4,667 | 0.000214 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
In this example, we connect a signal
of a QSlider to a slot
of a QLCDNumber.
"""
import sys
from PySide.QtGui import *
from PySide.QtCore import *
class Example(QWidget):
def __init__(self):
super(Example, self).__init__()
lcd = QLCDNumber()
sld = QSlider(Qt.Horizontal)
vbox = QVBoxLayout()
vbox.addWidget(lcd)
vbox.addWidget(sld)
sld.valueChanged.connect(lcd.display)
self.setLayout(vbox)
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Signal & slot')
def main():
app = QApplication(sys.argv)
ex = Example()
ex.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
madoodia/codeLab
|
pyside/signal_slot_tests/test_signal_slot.py
|
Python
|
mit
| 752 | 0.00133 |
"""
Package for managing a ranked ladder, which is a special kind of ongoing League.
Package Requirements
--------------------
botbase
match
user
Dependencies
------------
cmd_ladder
botbase/
commandtype
ladder/
ratingsdb
match/
cmd_match
matchinfo
user/
userlib
ladder
util/
server
ladderadminchannel
botbase/
botchannel
cmd_seedgen
ladder/
cmd_ladder
race/
cmd_racestats
user/
cmd_user
rating
ratingsdb
database/
dbconnect
ladder/
rating
ratingutil
ratingutil
ladder/
rating
util/
console
"""
|
incnone/necrobot
|
necrobot/ladder/__init__.py
|
Python
|
mit
| 678 | 0.004425 |
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class ABCTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
global a, b, c
a = Table(
"a",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("adata", String(30)),
Column("type", String(30)),
)
b = Table(
"b",
metadata,
Column("id", Integer, ForeignKey("a.id"), primary_key=True),
Column("bdata", String(30)),
)
c = Table(
"c",
metadata,
Column("id", Integer, ForeignKey("b.id"), primary_key=True),
Column("cdata", String(30)),
)
@testing.combinations(("union",), ("none",))
def test_abc_poly_roundtrip(self, fetchtype):
class A(fixtures.ComparableEntity):
pass
class B(A):
pass
class C(B):
pass
if fetchtype == "union":
abc = a.outerjoin(b).outerjoin(c)
bc = a.join(b).outerjoin(c)
else:
abc = bc = None
self.mapper_registry.map_imperatively(
A,
a,
with_polymorphic=("*", abc),
polymorphic_on=a.c.type,
polymorphic_identity="a",
)
self.mapper_registry.map_imperatively(
B,
b,
with_polymorphic=("*", bc),
inherits=A,
polymorphic_identity="b",
)
self.mapper_registry.map_imperatively(
C, c, inherits=B, polymorphic_identity="c"
)
a1 = A(adata="a1")
b1 = B(bdata="b1", adata="b1")
b2 = B(bdata="b2", adata="b2")
b3 = B(bdata="b3", adata="b3")
c1 = C(cdata="c1", bdata="c1", adata="c1")
c2 = C(cdata="c2", bdata="c2", adata="c2")
c3 = C(cdata="c2", bdata="c2", adata="c2")
sess = fixture_session()
for x in (a1, b1, b2, b3, c1, c2, c3):
sess.add(x)
sess.flush()
sess.expunge_all()
# for obj in sess.query(A).all():
# print obj
eq_(
[
A(adata="a1"),
B(bdata="b1", adata="b1"),
B(bdata="b2", adata="b2"),
B(bdata="b3", adata="b3"),
C(cdata="c1", bdata="c1", adata="c1"),
C(cdata="c2", bdata="c2", adata="c2"),
C(cdata="c2", bdata="c2", adata="c2"),
],
sess.query(A).order_by(A.id).all(),
)
eq_(
[
B(bdata="b1", adata="b1"),
B(bdata="b2", adata="b2"),
B(bdata="b3", adata="b3"),
C(cdata="c1", bdata="c1", adata="c1"),
C(cdata="c2", bdata="c2", adata="c2"),
C(cdata="c2", bdata="c2", adata="c2"),
],
sess.query(B).order_by(A.id).all(),
)
eq_(
[
C(cdata="c1", bdata="c1", adata="c1"),
C(cdata="c2", bdata="c2", adata="c2"),
C(cdata="c2", bdata="c2", adata="c2"),
],
sess.query(C).order_by(A.id).all(),
)
|
monetate/sqlalchemy
|
test/orm/inheritance/test_abc_polymorphic.py
|
Python
|
mit
| 3,596 | 0 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
import uuid
from .. import models
class IntegrationAccountSchemasOperations(object):
"""IntegrationAccountSchemasOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: The API version. Constant value: "2015-08-01-preview".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2015-08-01-preview"
self.config = config
def list(
self, resource_group_name, integration_account_name, top=None, filter=None, custom_headers=None, raw=False, **operation_config):
"""Gets a list of integration account schemas.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param top: The number of items to be included in the result.
:type top: int
:param filter: The filter to apply on the operation.
:type filter: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`IntegrationAccountSchemaPaged
<azure.mgmt.logic.models.IntegrationAccountSchemaPaged>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.IntegrationAccountSchemaPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.IntegrationAccountSchemaPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def get(
self, resource_group_name, integration_account_name, schema_name, custom_headers=None, raw=False, **operation_config):
"""Gets an integration account schema.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param schema_name: The integration account schema name.
:type schema_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`IntegrationAccountSchema
<azure.mgmt.logic.models.IntegrationAccountSchema>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'schemaName': self._serialize.url("schema_name", schema_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IntegrationAccountSchema', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, integration_account_name, schema_name, schema, custom_headers=None, raw=False, **operation_config):
"""Creates or updates an integration account schema.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param schema_name: The integration account schema name.
:type schema_name: str
:param schema: The integration account schema.
:type schema: :class:`IntegrationAccountSchema
<azure.mgmt.logic.models.IntegrationAccountSchema>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`IntegrationAccountSchema
<azure.mgmt.logic.models.IntegrationAccountSchema>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'schemaName': self._serialize.url("schema_name", schema_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(schema, 'IntegrationAccountSchema')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IntegrationAccountSchema', response)
if response.status_code == 201:
deserialized = self._deserialize('IntegrationAccountSchema', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def delete(
self, resource_group_name, integration_account_name, schema_name, custom_headers=None, raw=False, **operation_config):
"""Deletes an integration account schema.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param integration_account_name: The integration account name.
:type integration_account_name: str
:param schema_name: The integration account schema name.
:type schema_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Logic/integrationAccounts/{integrationAccountName}/schemas/{schemaName}'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'integrationAccountName': self._serialize.url("integration_account_name", integration_account_name, 'str'),
'schemaName': self._serialize.url("schema_name", schema_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
rjschwei/azure-sdk-for-python
|
azure-mgmt-logic/azure/mgmt/logic/operations/integration_account_schemas_operations.py
|
Python
|
mit
| 14,610 | 0.002806 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
METRIC_TYPE = 'compute.googleapis.com/instance/uptime'
METRIC_LABELS = {'instance_name': 'instance-1'}
RESOURCE_TYPE = 'gce_instance'
RESOURCE_LABELS = {
'project_id': 'my-project',
'zone': 'us-east1-a',
'instance_id': '1234567890123456789',
}
METRIC_KIND = 'DELTA'
VALUE_TYPE = 'DOUBLE'
TS0 = '2016-04-06T22:05:00.042Z'
TS1 = '2016-04-06T22:05:01.042Z'
TS2 = '2016-04-06T22:05:02.042Z'
class TestTimeSeries(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.timeseries import TimeSeries
return TimeSeries
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
from gcloud.monitoring.metric import Metric
from gcloud.monitoring.resource import Resource
from gcloud.monitoring.timeseries import Point
VALUE = 60 # seconds
METRIC = Metric(type=METRIC_TYPE, labels=METRIC_LABELS)
RESOURCE = Resource(type=RESOURCE_TYPE, labels=RESOURCE_LABELS)
POINTS = [
Point(start_time=TS0, end_time=TS1, value=VALUE),
Point(start_time=TS1, end_time=TS2, value=VALUE),
]
series = self._makeOne(metric=METRIC,
resource=RESOURCE,
metric_kind=METRIC_KIND,
value_type=VALUE_TYPE,
points=POINTS)
self.assertEqual(series.metric, METRIC)
self.assertEqual(series.resource, RESOURCE)
self.assertEqual(series.metric_kind, METRIC_KIND)
self.assertEqual(series.value_type, VALUE_TYPE)
self.assertEqual(series.points, POINTS)
def test_from_dict(self):
VALUE = 60 # seconds
info = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
'points': [
{
'interval': {'startTime': TS0, 'endTime': TS1},
'value': {'doubleValue': VALUE},
},
{
'interval': {'startTime': TS1, 'endTime': TS2},
'value': {'doubleValue': VALUE},
},
],
}
series = self._getTargetClass()._from_dict(info)
self.assertEqual(series.metric.type, METRIC_TYPE)
self.assertEqual(series.metric.labels, METRIC_LABELS)
self.assertEqual(series.resource.type, RESOURCE_TYPE)
self.assertEqual(series.resource.labels, RESOURCE_LABELS)
self.assertEqual(series.metric_kind, METRIC_KIND)
self.assertEqual(series.value_type, VALUE_TYPE)
self.assertEqual(len(series.points), 2)
point1, point2 = series.points
self.assertEqual(point1.start_time, TS0)
self.assertEqual(point1.end_time, TS1)
self.assertEqual(point1.value, VALUE)
self.assertEqual(point2.start_time, TS1)
self.assertEqual(point2.end_time, TS2)
self.assertEqual(point2.value, VALUE)
def test_from_dict_no_points(self):
info = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
series = self._getTargetClass()._from_dict(info)
self.assertEqual(series.metric.type, METRIC_TYPE)
self.assertEqual(series.metric.labels, METRIC_LABELS)
self.assertEqual(series.resource.type, RESOURCE_TYPE)
self.assertEqual(series.resource.labels, RESOURCE_LABELS)
self.assertEqual(series.metric_kind, METRIC_KIND)
self.assertEqual(series.value_type, VALUE_TYPE)
self.assertEqual(series.points, [])
def test_labels(self):
info = {
'metric': {'type': METRIC_TYPE, 'labels': METRIC_LABELS},
'resource': {'type': RESOURCE_TYPE, 'labels': RESOURCE_LABELS},
'metricKind': METRIC_KIND,
'valueType': VALUE_TYPE,
}
series = self._getTargetClass()._from_dict(info)
labels = {'resource_type': RESOURCE_TYPE}
labels.update(RESOURCE_LABELS)
labels.update(METRIC_LABELS)
self.assertIsNone(series._labels)
self.assertEqual(series.labels, labels)
self.assertIsNotNone(series._labels)
self.assertEqual(series.labels, labels)
class TestPoint(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.monitoring.timeseries import Point
return Point
def _makeOne(self, *args, **kwargs):
return self._getTargetClass()(*args, **kwargs)
def test_constructor(self):
VALUE = 3.14
point = self._makeOne(start_time=TS0, end_time=TS1, value=VALUE)
self.assertEqual(point.start_time, TS0)
self.assertEqual(point.end_time, TS1)
self.assertEqual(point.value, VALUE)
def test_from_dict(self):
VALUE = 3.14
info = {
'interval': {'startTime': TS0, 'endTime': TS1},
'value': {'doubleValue': VALUE},
}
point = self._getTargetClass()._from_dict(info)
self.assertEqual(point.start_time, TS0)
self.assertEqual(point.end_time, TS1)
self.assertEqual(point.value, VALUE)
def test_from_dict_defaults(self):
VALUE = 3.14
info = {
'interval': {'endTime': TS1},
'value': {'doubleValue': VALUE},
}
point = self._getTargetClass()._from_dict(info)
self.assertIsNone(point.start_time)
self.assertEqual(point.end_time, TS1)
self.assertEqual(point.value, VALUE)
def test_from_dict_int64(self):
VALUE = 2 ** 63 - 1
info = {
'interval': {'endTime': TS1},
'value': {'int64Value': str(VALUE)},
}
point = self._getTargetClass()._from_dict(info)
self.assertIsNone(point.start_time)
self.assertEqual(point.end_time, TS1)
self.assertEqual(point.value, VALUE)
|
ininex/geofire-python
|
resource/lib/python2.7/site-packages/gcloud/monitoring/test_timeseries.py
|
Python
|
mit
| 6,799 | 0 |
from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
self.path = '%s/%s' % (script_name.rstrip('/'), path_info.lstrip('/'))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value if six.PY2 else value.encode(ISO_8859_1)
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Same comment as above
return value if six.PY2 else value.encode(ISO_8859_1).decode(UTF_8, errors='replace')
|
rooshilp/CMPUT410Lab6
|
virt_env/virt1/lib/python2.7/site-packages/django/core/handlers/wsgi.py
|
Python
|
apache-2.0
| 9,514 | 0.000736 |
import mysql.connector
from model.group import Group
from model.contact import Contact
class DbFixture:
def __init__(self, host, dbname, username, password):
self.host = host
self.dbname = dbname
self.username = username
self.password = password
self.connection = mysql.connector.connect(host=host, database=dbname, user=username, password=password)
self.connection.autocommit = True
def get_group_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select group_id, group_name, group_header, group_footer from group_list")
for row in cursor:
(id, name, header, footer) = row
list.append(Group(id=str(id), name=name, header=header, footer=footer))
finally:
cursor.close()
return list
def get_contact_list(self):
list = []
cursor = self.connection.cursor()
try:
cursor.execute("select id, firstname, lastname from addressbook where deprecated = '0000-00-00 00:00:00'")
for row in cursor:
(id, firstname, lastname) = row
list.append(Contact(id=str(id), firstname=firstname, lastname=lastname))
finally:
cursor.close()
return list
def destroy(self):
self.connection.close()
|
0verchenko/Addressbook
|
fixture/db.py
|
Python
|
apache-2.0
| 1,388 | 0.005043 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.