text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2011 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# a library of any functions for spell(book) managment
import GemRB
import CommonTables
import GameCheck
from GUIDefines import *
from ie_stats import *
from ie_action import ACT_LEFT, ACT_RIGHT
from ie_spells import *
from ie_restype import RES_2DA
#################################################################
# this is in the operator module of the standard python lib
def itemgetter(*items):
if len(items) == 1:
item = items[0]
def g(obj):
return obj[item]
else:
def g(obj):
return tuple(obj[item] for item in items)
return g
#################################################################
# routines for the actionbar spell access code
def GetUsableMemorizedSpells(actor, BookType):
memorizedSpells = []
spellResRefs = []
for level in range (20): # Saradas NPC teaches you a level 14 special ...
spellCount = GemRB.GetMemorizedSpellsCount (actor, BookType, level, False)
for i in range (spellCount):
Spell0 = GemRB.GetMemorizedSpell (actor, BookType, level, i)
if not Spell0["Flags"]:
# depleted, so skip
continue
if Spell0["SpellResRef"] in spellResRefs:
# add another one, so we can get the count more cheaply later
spellResRefs.append (Spell0["SpellResRef"])
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['SpellIndex'] = GemRB.GetSpelldataIndex (actor, Spell["SpellResRef"], 1<<BookType) # crucial!
if Spell['SpellIndex'] == -1:
print "Error, memorized spell not found!", Spell["SpellResRef"], 1<<BookType
Spell['SpellIndex'] += 1000 * 1<<BookType
memorizedSpells.append (Spell)
if not len(memorizedSpells):
return []
# count and remove the duplicates
memorizedSpells2 = []
for spell in memorizedSpells:
if spell["SpellResRef"] in spellResRefs:
spell['MemoCount'] = spellResRefs.count(spell["SpellResRef"])
while spell["SpellResRef"] in spellResRefs:
spellResRefs.remove(spell["SpellResRef"])
memorizedSpells2.append(spell)
return memorizedSpells2
def GetKnownSpells(actor, BookType):
knownSpells = []
spellResRefs = []
for level in range (9):
spellCount = GemRB.GetKnownSpellsCount (actor, BookType, level)
for i in range (spellCount):
Spell0 = GemRB.GetKnownSpell (actor, BookType, level, i)
if Spell0["SpellResRef"] in spellResRefs:
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
Spell['MemoCount'] = 0
Spell['SpellIndex'] = 1000 * 1<<BookType # this gets assigned properly later
knownSpells.append (Spell)
return knownSpells
def GetKnownSpellsLevel(actor, BookType, level):
knownSpells = []
spellResRefs = []
spellCount = GemRB.GetKnownSpellsCount (actor, BookType, level)
for i in range (spellCount):
Spell0 = GemRB.GetKnownSpell (actor, BookType, level, i)
if Spell0["SpellResRef"] in spellResRefs:
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['BookType'] = BookType # just another sorting key
knownSpells.append (Spell)
return knownSpells
def index (list, value):
for i in range(len(list)):
if list[i]==value:
return i
return -1
def GetMemorizedSpells(actor, BookType, level):
memoSpells = []
spellResRefs = []
spellCount = GemRB.GetMemorizedSpellsCount (actor, BookType, level, False)
for i in range (spellCount):
Spell0 = GemRB.GetMemorizedSpell (actor, BookType, level, i)
pos = index(spellResRefs,Spell0["SpellResRef"])
if pos!=-1:
memoSpells[pos]['KnownCount']+=1
memoSpells[pos]['MemoCount']+=Spell0["Flags"]
continue
spellResRefs.append (Spell0["SpellResRef"])
Spell = GemRB.GetSpell(Spell0["SpellResRef"])
Spell['KnownCount'] = 1
Spell['MemoCount'] = Spell0["Flags"]
memoSpells.append (Spell)
return memoSpells
# direct access to the spellinfo struct
# SpellIndex is the index of the spell in the struct, but we add a thousandfold of the spell type for later use in SpellPressed
def GetSpellinfoSpells(actor, BookType):
memorizedSpells = []
spellResRefs = GemRB.GetSpelldata (actor)
i = 0
for resref in spellResRefs:
Spell = GemRB.GetSpell(resref)
Spell['BookType'] = BookType # just another sorting key
Spell['SpellIndex'] = i + 1000 * 255 # spoofing the type, so any table would work
Spell['MemoCount'] = 1
memorizedSpells.append (Spell)
i += 1
return memorizedSpells
def SortUsableSpells(memorizedSpells):
# sort it by using the spldisp.2da table
layout = CommonTables.SpellDisplay.GetValue ("USE_ROW", "ROWS")
layout = CommonTables.SpellDisplay.GetRowName (layout)
order = CommonTables.SpellDisplay.GetValue ("DESCENDING", "ROWS")
key1 = CommonTables.SpellDisplay.GetValue (layout, "KEY1")
key2 = CommonTables.SpellDisplay.GetValue (layout, "KEY2")
key3 = CommonTables.SpellDisplay.GetValue (layout, "KEY3")
if key1:
if key3 and key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2, key3), reverse=order)
elif key2:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1, key2), reverse=order)
else:
memorizedSpells = sorted(memorizedSpells, key=itemgetter(key1), reverse=order)
return memorizedSpells
# Sets up all the (12) action buttons for a player character with different spell or innate icons.
# It also sets up the scroll buttons left and right if needed.
# If Start is supplied, it will skip the first few items (used when scrolling through the list)
# BookType is a spellbook type bitfield (1-mage, 2-priest, 4-innate and others in iwd2)
# Offset is a control ID offset here for iwd2 purposes
def SetupSpellIcons(Window, BookType, Start=0, Offset=0):
actor = GemRB.GameGetFirstSelectedActor ()
# check if we're dealing with a temporary spellbook
if GemRB.GetVar("ActionLevel") == 11:
allSpells = GetSpellinfoSpells (actor, BookType)
else:
# construct the spellbook of usable (not depleted) memorized spells
# the getters expect the BookType as: 0 priest, 1 mage, 2 innate
if BookType == -1:
# Nahal's reckless dweomer can use any known spell
allSpells = GetKnownSpells (actor, IE_SPELL_TYPE_WIZARD)
else:
allSpells = []
for i in range(16):
if BookType & (1<<i):
allSpells += GetUsableMemorizedSpells (actor, i)
if not len(allSpells):
raise AttributeError ("Error, unknown BookType passed to SetupSpellIcons: %d! Bailing out!" %(BookType))
return
if BookType == -1:
memorizedSpells = allSpells
# reset Type, so we can choose the surge spell instead of just getting a redraw of the action bar
GemRB.SetVar("Type", 3)
else:
memorizedSpells = SortUsableSpells(allSpells)
# start creating the controls
import GUICommonWindows
# TODO: ASCOL, ROWS
#AsCol = CommonTables.SpellDisplay.GetValue (layout, "AS_COL")
#Rows = CommonTables.SpellDisplay.GetValue (layout, "ROWS")
More = len(memorizedSpells) > 12 or Start > 0
# scroll left button
if More:
Button = Window.GetControl (Offset)
Button.SetText ("")
if Start:
GUICommonWindows.SetActionIconWorkaround (Button, ACT_LEFT, 0)
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
else:
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetTooltip ("")
Button.SetState (IE_GUI_BUTTON_DISABLED)
# disable all spells if fx_disable_spellcasting was run with the same type
# but only if there are any spells of that type to disable
disabled_spellcasting = GemRB.GetPlayerStat(actor, IE_CASTING, 0)
actionLevel = GemRB.GetVar ("ActionLevel")
#order is: mage, cleric, innate, class, song, (defaults to 1, item)
spellSections = [2, 4, 8, 16, 16]
# create the spell icon buttons
buttonCount = 12 - More * 2 # GUIBT_COUNT in PCStatsStruct
for i in range (buttonCount):
Button = Window.GetControl (i+Offset+More)
Button.SetEvent (IE_GUI_BUTTON_ON_RIGHT_PRESS, None)
if i+Start >= len(memorizedSpells):
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetText ("")
Button.SetTooltip ("")
continue
Spell = memorizedSpells[i+Start]
spellType = Spell['SpellType']
if spellType > 4:
spellType = 1
else:
spellType = spellSections[spellType]
if BookType == -1:
Button.SetVarAssoc ("Spell", Spell['SpellIndex']+i+Start)
else:
Button.SetVarAssoc ("Spell", Spell['SpellIndex'])
# disable spells that should be cast from the inventory or can't be cast while silenced or ...
# see splspec.2da for all the reasons; silence is handled elsewhere
specialSpell = GemRB.CheckSpecialSpell(actor, Spell['SpellResRef'])
specialSpell = (specialSpell & SP_IDENTIFY) or ((specialSpell & SP_SURGE) and actionLevel == 5)
if specialSpell & SP_SILENCE and Spell['HeaderFlags'] & 0x20000: # SF_IGNORES_SILENCE
specialSpell ^= SP_SILENCE
if specialSpell or (disabled_spellcasting&spellType):
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.EnableBorder(1, 0)
else:
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
Button.SetEvent (IE_GUI_BUTTON_ON_PRESS, GUICommonWindows.SpellPressed)
Button.SetEvent (IE_GUI_BUTTON_ON_SHIFT_PRESS, GUICommonWindows.SpellShiftPressed)
if Spell['SpellResRef']:
Button.SetSprites ("guibtbut", 0, 0,1,2,3)
Button.SetSpellIcon (Spell['SpellResRef'], 1)
Button.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_ALIGN_BOTTOM|IE_GUI_BUTTON_ALIGN_RIGHT, OP_SET)
Button.SetTooltip (Spell['SpellName'])
if Spell['MemoCount'] > 0 and BookType != -1:
Button.SetText (str(Spell['MemoCount']))
else:
Button.SetText ("")
# scroll right button
if More:
Button = Window.GetControl (Offset+buttonCount+1)
GUICommonWindows.SetActionIconWorkaround (Button, ACT_RIGHT, buttonCount)
Button.SetText ("")
if len(memorizedSpells) - Start > 10:
Button.SetState (IE_GUI_BUTTON_UNPRESSED)
else:
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button.SetFlags (IE_GUI_BUTTON_NO_IMAGE, OP_SET)
Button.SetTooltip ("")
#################################################################
# routines used during character generation and levelup
#################################################################
def GetMageSpells (Kit, Alignment, Level):
MageSpells = []
SpellType = 99
v = CommonTables.Aligns.FindValue (3, Alignment)
Usability = Kit | CommonTables.Aligns.GetValue(v, 5)
HokeyPokey = "MAGE"
WildMages = True
BadSchools = 0
if GameCheck.IsIWD2():
HokeyPokey = "WIZARD"
WildMages = False
# iwd2 has only per-kit exclusion, spells can't override it
ExclusionTable = GemRB.LoadTable ("magesch")
KitRow = ExclusionTable.FindValue ("KIT", Kit)
KitRow = ExclusionTable.GetRowName (KitRow)
BadSchools = ExclusionTable.GetValue (KitRow, "EXCLUSION")
if BadSchools == -1:
BadSchools = 0
SpellsTable = GemRB.LoadTable ("spells")
for i in range(SpellsTable.GetValue (HokeyPokey, str(Level), GTV_INT)):
SpellName = "SPWI%d%02d"%(Level,i+1)
ms = GemRB.GetSpell (SpellName, 1)
if ms == None:
continue
if Usability & ms['SpellExclusion']:
SpellType = 0
elif BadSchools & (1<<ms['SpellSchool']+5):
SpellType = 0
else:
SpellType = 1
if Kit & (1 << ms['SpellSchool']+5): # of matching specialist school
SpellType = 2
# Wild mage spells are of normal schools, so we have to find them
# separately. Generalists can learn any spell but the wild ones, so
# we check if the mage is wild and if a generalist wouldn't be able
# to learn the spell.
if WildMages and Kit == 0x8000 and (0x4000 & ms['SpellExclusion']):
SpellType = 2
MageSpells.append ([SpellName, SpellType])
return MageSpells
def GetLearnableMageSpells (Kit, Alignment, Level):
Learnable = []
for Spell in GetMageSpells (Kit, Alignment, Level):
if Spell[1]:
Learnable.append (Spell[0])
return Learnable
def GetLearnableDomainSpells (pc, Level):
import GUICommon
import GUICommonWindows
Learnable =[]
# only clerics have domains due to listdom.2da restrictions
# no need to double check, as we only call this for IE_IWD2_SPELL_CLERIC
BaseClassName = GUICommon.GetClassRowName (pc)
BaseClassIndex = CommonTables.Classes.GetRowIndex (BaseClassName)
# columns correspond to kits in the same order
KitIndex = GUICommonWindows.GetKitIndex (pc, BaseClassIndex)
if KitIndex == -1:
print "GetLearnableDomainSpells: couldn't determine the kit, bailing out!"
return Learnable
# calculate the offset from the first cleric kit
KitIndex -= CommonTables.Classes.FindValue ("CLASS", BaseClassIndex+1)
DomainSpellTable = GemRB.LoadTable ("listdomn")
# check everything in case someone wants to mod the spell amount
for i in range(DomainSpellTable.GetRowCount ()):
if DomainSpellTable.GetValue (i, KitIndex) == Level:
SpellName = DomainSpellTable.GetRowName (i)
SpellName = DomainSpellTable.GetValue (SpellName, "DOMAIN_RESREF")
Learnable.append (SpellName)
return Learnable
def GetLearnablePriestSpells (Class, Alignment, Level, booktype=0):
Learnable =[]
v = CommonTables.Aligns.FindValue(3, Alignment)
#usability is the bitset we look for
Usability = CommonTables.Aligns.GetValue(v, 5)
HolyMoly = "PRIEST"
SpellListTable = None
if GameCheck.IsIWD2():
HolyMoly = "CLERIC"
SpellListTable = GemRB.LoadTable ("listspll")
SpellsTable = GemRB.LoadTable ("spells")
for i in range(SpellsTable.GetValue (HolyMoly, str (Level), GTV_INT)):
SpellName = "SPPR%d%02d"%(Level,i+1)
ms = GemRB.GetSpell(SpellName, 1)
if ms == None:
continue
if Class & ms['SpellDivine']:
continue
if Usability & ms['SpellExclusion']:
continue
if SpellListTable:
idx = SpellListTable.FindValue ("SPELL_RES_REF", SpellName)
# columns are in the same order as booktypes
if SpellListTable.GetValue (idx, booktype) <= 0:
continue
Learnable.append (SpellName)
return Learnable
# there is no separate druid spell table in the originals
#FIXME: try to do this in a non-hard way?
def GetPriestSpellTable(tablename):
if GameCheck.IsIWD2():
return tablename # no need for this folly
if not GemRB.HasResource (tablename, RES_2DA):
if tablename == "MXSPLDRU":
return "MXSPLPRS"
return tablename
def SetupSpellLevels (pc, TableName, Type, Level):
#don't die on a missing reference
tmp = GetPriestSpellTable(TableName)
if tmp != TableName:
SetupSpellLevels (pc, tmp, Type, Level)
return
Table = GemRB.LoadTable (TableName)
kit = GemRB.GetPlayerStat (pc, IE_KIT)
for i in range(Table.GetColumnCount ()):
# do a string lookup since some tables don't have entries for all levels
value = Table.GetValue (str(Level), str(i+1), GTV_INT)
# specialist mages get an extra spell if they already know that level
# FIXME: get a general routine to find specialists
school = GemRB.GetVar("MAGESCHOOL")
if (Type == IE_SPELL_TYPE_WIZARD and school != 0) or \
(GameCheck.IsIWD2() and Type == IE_IWD2_SPELL_WIZARD and not (kit&0x4000)):
if value > 0:
value += 1
elif Type == IE_IWD2_SPELL_DOMAIN:
if value > 0:
value = 1 # since we're reusing the main cleric table
GemRB.SetMemorizableSpellsCount (pc, value, Type, i)
return
def UnsetupSpellLevels (pc, TableName, Type, Level):
#don't die on a missing reference
tmp = GetPriestSpellTable(TableName)
if tmp != TableName:
UnsetupSpellLevels (pc, tmp, Type, Level)
return
Table = GemRB.LoadTable (TableName)
for i in range(Table.GetColumnCount ()):
GemRB.SetMemorizableSpellsCount (pc, 0, Type, i)
return
# Returns -1 if not found; otherwise, the index of the spell
def HasSpell (Actor, SpellType, Level, Ref):
# loop through each spell in the spell level and check for a matching ref
for i in range (GemRB.GetKnownSpellsCount (Actor, SpellType, Level)):
Spell = GemRB.GetKnownSpell(Actor, SpellType, Level, i)
if Spell["SpellResRef"].upper() == Ref.upper(): # ensure case is the same
return i
# not found
return -1
def HasSorcererBook (pc):
import GUICommon
ClassName = GUICommon.GetClassRowName (pc)
SorcererBook = CommonTables.ClassSkills.GetValue (ClassName, "BOOKTYPE") & 2
return SorcererBook
def CannotLearnSlotSpell ():
pc = GemRB.GameGetSelectedPCSingle ()
# disqualify sorcerers immediately
if HasSorcererBook (pc):
return LSR_STAT
booktype = IE_SPELL_TYPE_WIZARD
if GameCheck.IsIWD2():
booktype = IE_IWD2_SPELL_WIZARD
if GameCheck.IsPST():
import GUIINV
slot, slot_item = GUIINV.ItemHash[GemRB.GetVar ('ItemButton')]
else:
slot_item = GemRB.GetSlotItem (pc, GemRB.GetVar ("ItemButton"))
spell_ref = GemRB.GetItem (slot_item['ItemResRef'], pc)['Spell']
spell = GemRB.GetSpell (spell_ref)
level = spell['SpellLevel']
# school conflicts are handled before this is called from inventory
# add them here if a need arises
# maybe she already knows this spell
if HasSpell (pc, booktype, level-1, spell_ref) != -1:
return LSR_KNOWN
# level check (needs enough intelligence for this level of spell)
dumbness = GemRB.GetPlayerStat (pc, IE_INT)
if level > GemRB.GetAbilityBonus (IE_INT, 1, dumbness):
return LSR_LEVEL
spell_count = GemRB.GetKnownSpellsCount (pc, booktype, level-1)
if spell_count > GemRB.GetAbilityBonus (IE_INT, 2, dumbness):
return LSR_FULL
return 0
def LearnPriestSpells (pc, level, mask):
"""Learns all the priest spells through the given spell level.
Mask distinguishes clerical and druidic spells."""
# make sure we don't have too high a level
booktype = IE_SPELL_TYPE_PRIEST
if GameCheck.IsIWD2():
level = min(9, level)
booktype = mask
mask = 0 # no classflags restrictions like in others (differentiating cleric/rangers)
else:
level = min(7, level)
# go through each level
alignment = GemRB.GetPlayerStat (pc, IE_ALIGNMENT)
for i in range (level):
if booktype == IE_IWD2_SPELL_DOMAIN:
learnable = GetLearnableDomainSpells (pc, i+1)
else:
learnable = GetLearnablePriestSpells (mask, alignment, i+1, booktype)
for spell in learnable:
# if the spell isn't learned, learn it
if HasSpell (pc, booktype, i, spell) < 0:
if GameCheck.IsIWD2() and booktype == IE_IWD2_SPELL_DOMAIN:
GemRB.LearnSpell (pc, spell, 1<<booktype, i)
else:
# perhaps forcing would be fine here too, but it's untested in other games
# and iwd2 cleric schools grant certain spells at different levels
GemRB.LearnSpell (pc, spell)
return
def RemoveKnownSpells (pc, type, level1=1, level2=1, noslots=0, kit=0):
"""Removes all known spells of a given type between two spell levels.
If noslots is true, all memorization counts are set to 0.
Kit is used to identify the priest spell mask of the spells to be removed;
this is only used when removing spells in a dualclass."""
# choose the correct limit based upon class type
if type == IE_SPELL_TYPE_WIZARD or GameCheck.IsIWD2():
limit = 9
elif type == IE_SPELL_TYPE_PRIEST:
limit = 7
# make sure that we get the original kit, if we have one
if kit:
originalkit = GetKitIndex (pc)
if originalkit: # kitted; find the class value
originalkit = CommonTables.KitList.GetValue (originalkit, 7)
else: # just get the class value
originalkit = GemRB.GetPlayerStat (pc, IE_CLASS)
originalkit = GUICommon.GetClassRowName (originalkit, "class")
# this is is specifically for dual-classes and will not work to remove only one
# spell type from a ranger/cleric multi-class
if CommonTables.ClassSkills.GetValue (originalkit, "DRUIDSPELL", GTV_STR) != "*": # knows druid spells
originalkit = 0x8000
elif CommonTables.ClassSkills.GetValue (originalkit, "CLERICSPELL", GTV_STR) != "*": # knows cleric spells
originalkit = 0x4000
else: # don't know any other spells
originalkit = 0
# don't know how this would happen, but better to be safe
if originalkit == kit:
originalkit = 0
elif type == IE_SPELL_TYPE_INNATE:
limit = 1
else: # can't do anything if an improper spell type is sent
return 0
if GameCheck.IsIWD2():
kit = 0 # just skip the dualclass logic
# make sure we're within parameters
if level1 < 1 or level2 > limit or level1 > level2:
return 0
# remove all spells for each level
for level in range (level1-1, level2):
# we need the count because we remove each spell in reverse order
count = GemRB.GetKnownSpellsCount (pc, type, level)
mod = count-1
for spell in range (count):
# see if we need to check for kit
if type == IE_SPELL_TYPE_PRIEST and kit:
# get the spell's ref data
ref = GemRB.GetKnownSpell (pc, type, level, mod-spell)
ref = GemRB.GetSpell (ref['SpellResRef'], 1)
# we have to look at the originalkit as well specifically for ranger/cleric dual-classes
# we wouldn't want to remove all cleric spells and druid spells if we lost our cleric class
# only the cleric ones
if kit&ref['SpellDivine'] or (originalkit and not originalkit&ref['SpellDivine']):
continue
# remove the spell
GemRB.RemoveSpell (pc, type, level, mod-spell)
# remove memorization counts if desired
if noslots:
GemRB.SetMemorizableSpellsCount (pc, 0, type, level)
# return success
return 1
# learning/memorization wrapper for when you want to give more than 1 instance
# learn a spell if we don't know it yet, otherwise just increase the memo count
def LearnSpell(pc, spellref, booktype, level, count, flags=0):
SpellIndex = HasSpell (pc, booktype, level, spellref)
if SpellIndex < 0:
ret = GemRB.LearnSpell (pc, spellref, flags)
if ret != LSR_OK and ret != LSR_KNOWN:
raise RuntimeError, "Failed learning spell: %s !" %(spellref)
return
SpellIndex = HasSpell (pc, booktype, level, spellref)
count -= 1
if count <= 0:
return
if SpellIndex == -1:
# should never happen
raise RuntimeError, "LearnSpell: Severe spellbook problems: %s !" %(spellref)
return
for j in range(count):
GemRB.MemorizeSpell (pc, booktype, level, SpellIndex, flags&LS_MEMO)
|
flaing/gemrb
|
gemrb/GUIScripts/Spellbook.py
|
Python
|
gpl-2.0
| 22,628 | 0.033543 |
# Refer to the following link for help:
# http://docs.gunicorn.org/en/latest/settings.html
command = '/home/lucas/www/reddit.lucasou.com/reddit-env/bin/gunicorn'
pythonpath = '/home/lucas/www/reddit.lucasou.com/reddit-env/flask_reddit'
bind = '127.0.0.1:8040'
workers = 1
user = 'lucas'
accesslog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-access.log'
errorlog = '/home/lucas/logs/reddit.lucasou.com/gunicorn-error.log'
|
codelucas/flask_reddit
|
server/gunicorn_config.py
|
Python
|
mit
| 425 | 0 |
import unittest
from katas.kyu_7.guess_my_number import guess_my_number
class GuessMyNumberTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(guess_my_number('0'), '###-###-####')
def test_equals_2(self):
self.assertEqual(guess_my_number('01'), '1##-##1-####')
def test_equals_3(self):
self.assertEqual(guess_my_number('012'), '12#-##1-2###')
def test_equals_4(self):
self.assertEqual(guess_my_number('0123'), '123-##1-23##')
def test_equals_5(self):
self.assertEqual(guess_my_number('01234'), '123-4#1-234#')
def test_equals_6(self):
self.assertEqual(guess_my_number('012345'), '123-451-2345')
|
the-zebulan/CodeWars
|
tests/kyu_7_tests/test_guess_my_number.py
|
Python
|
mit
| 693 | 0 |
# -*- coding: utf-8 -*-
#***********************************************************************
#
# Image Analysis
# ----------------------------------------------------------------------
# QGIS Image Analysis plugin for image segmentation and classification
#
# Vitor Hirota (vitor.hirota [at] gmail.com), INPE 2013
#
# This source is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This code is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# A copy of the GNU General Public License is available on the World
# Wide Web at <http://www.gnu.org/copyleft/gpl.html>. You can also
# obtain it by writing to the Free Software Foundation, Inc., 59 Temple
# Place - Suite 330, Boston, MA 02111-1307, USA.
#
#***********************************************************************
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import QMessageBox, QWidget
from qgis.core import QgsMessageLog, QgsMapLayerRegistry
from qgis.gui import QgsMessageBar
from analysis_ui import Ui_AnalysisWidget as Ui_Widget
import util
import classifier
import segmenter
import statistics
# from QgsMapLayer
VectorLayer = 0
RasterLayer = 1
# MESSAGE_LEVEL = util.AttrDict({
# 'INFO': 0,
# 'WARNING': 1,
# 'CRITICAL': 2
# })
class AnalysisWidget(QWidget, Ui_Widget):
def __init__(self, iface):
QWidget.__init__(self)
self.setupUi(self)
self.iface = iface
self.layer_registry = QgsMapLayerRegistry.instance()
self.layers = self.iface.legendInterface().layers()
self.task = None
self.iface.mapCanvas().layersChanged.connect(self.layers_changed)
self.tabs = ['segm', 'stats', 'clf']
self.tab_ipts = {
'segm': [self.segm_raster_ipt, self.segm_clusters_ipt],
'stats': [self.stats_raster_ipt, self.stats_segm_ipt],
'clf': [self.class_segm_ipt, self.class_roi_ipt,
self.class_roi_field, self.svm_kernel_ipt, self.svm_c_ipt,
self.svm_kgamma_ipt, self.svm_kdegree_ipt,
self.svm_kcoeff_ipt],
}
self.modules = {
'segm': segmenter,
'stats': statistics,
'clf': classifier
}
self.ok_btn.pressed.connect(self.run)
self.tabWidget.currentChanged['int'].connect(self.update_tab_focus)
self.tabWidgetClf.currentChanged['int'].connect(self.update_subfocus_clf)
self.update_tab_focus(self.tabWidget.currentIndex())
self.class_roi_ipt.currentIndexChanged['QString'].connect(self.update_roi_field)
self.svm_kernel_ipt.currentIndexChanged.connect(self.update_svm_attr)
def log(self, msg, level='info'):
level_dict = {
'info': QgsMessageLog.INFO,
'warn': QgsMessageLog.WARNING,
'crit': QgsMessageLog.CRITICAL,
}
QgsMessageLog.logMessage(str(msg), level=level_dict[level])
def layers_changed(self):
layers = self.iface.legendInterface().layers()
if self.layers != layers:
self.layers = layers
self.update_tab_focus(self.tabWidget.currentIndex())
def get_layers(self, ltype):
return [l for l in self.layers if l.type() == ltype]
def get_layer(self, ltype, name):
return [l for l in self.get_layers(ltype) if l.name() == name][0]
def update_combo_box(self, ltype, ipt):
ipt.clear()
ipt.addItems([u'',] + [l.name() for l in self.get_layers(ltype)])
def update_tab_order(self, inputs):
ipts = [self.tabWidget, self.ok_btn]
ipts[1:1] = inputs
for i in range(len(ipts)-1):
self.setTabOrder(ipts[i], ipts[i+1])
def update_tab_focus(self, index):
getattr(self, 'update_focus_%s' % self.tabs[index])()
self.tabWidget.setFocus()
def update_focus_segm(self):
# update combo boxes
self.update_combo_box(RasterLayer, self.segm_raster_ipt)
# tab order
self.update_tab_order(self.tab_ipts['segm'])
def update_focus_stats(self):
self.update_combo_box(RasterLayer, self.stats_raster_ipt)
self.update_combo_box(VectorLayer, self.stats_segm_ipt)
# tab order
self.update_tab_order(self.tab_ipts['stats'])
def update_focus_clf(self):
self.update_combo_box(0, self.class_segm_ipt)
self.update_combo_box(0, self.class_roi_ipt)
self.update_subfocus_clf()
def update_subfocus_clf(self):
idx = self.tabWidgetClf.currentIndex() and [3, None] or [None, 3]
ipts = self.tab_ipts['clf'][slice(*idx)] + [self.tabWidgetClf]
self.update_tab_order(ipts)
def update_roi_field(self, layer_name):
self.class_roi_field.clear()
if layer_name:
layer = self.get_layer(VectorLayer, layer_name)
fields = layer.dataProvider().fieldNameMap().keys()
self.class_roi_field.addItems(fields)
def update_svm_attr(self, item_index):
kernel = self.svm_kernel_ipt.currentText().lower()
ipts = self.tab_ipts['clf'][5:]
attr_list = {
'linear': [],
'poly': ipts[1:],
'rbf': ipts[0:1],
'sigmoid': ipts[2:3],
}
for ipt in ipts:
ipt.setEnabled(ipt in attr_list[kernel])
def get_text(self, ipt):
try:
return ipt.currentText()
except AttributeError:
return ipt.cleanText()
def run(self):
# create a new task instance
tab_name = self.tabs[self.tabWidget.currentIndex()]
self.log('starting %s' % tab_name)
# set task up
args = [self.get_text(ipt)
for ipt in self.tab_ipts[tab_name]]
task = self.modules[tab_name].Task(self, *args)
# validate
if not task.is_valid():
QMessageBox.critical(self.iface.mainWindow(), 'Error',
task.invalid)
return
# update gui
self.ok_btn.setEnabled(False)
self.cancel_btn.pressed.connect(task.kill)
self.progressBar.setValue(0)
# configure QgsMessageBar
action = self.tabWidget.tabText(self.tabWidget.currentIndex())
messageBar = self.iface.messageBar().createMessage(action, '')
msgProgressBar = QtGui.QProgressBar()
msgProgressBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
cancelButton = QtGui.QPushButton()
cancelButton.setText('Cancel')
cancelButton.clicked.connect(self.cancel_btn.click)
messageBar.layout().addWidget(msgProgressBar)
messageBar.layout().addWidget(cancelButton)
self.iface.messageBar().pushWidget(messageBar, QgsMessageBar.INFO)
# hold objects
self.messageBar = messageBar
self.action = action
# fire task
task.run()
# self.ok_btn.setEnabled(False)
# self.progressBar.setValue(0)
# self.thread = QtCore.QThread()
# # create a new worker instance
# tab_name = self.tabs[self.tabWidget.currentIndex()]
# self.log('starting %s' % tab_name)
# # set worker up
# args = [self.get_text(ipt) for ipt in self.tab_ipts[tab_name]]
# worker = self.modules[tab_name].Worker(self)
# self.worker = worker
# # worker = util.Worker()
# self.post_run = worker.post_run
# worker.setup(*args)
# # validate
# if not worker.is_valid():
# QMessageBox.critical(self.iface.mainWindow(), 'Error',
# worker.invalid)
# self.kill()
# return
# # configure QgsMessageBar
# action = self.tabWidget.tabText(self.tabWidget.currentIndex())
# messageBar = self.iface.messageBar().createMessage(action, '')
# progressBar = QtGui.QProgressBar()
# progressBar.setAlignment(QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
# cancelButton = QtGui.QPushButton()
# cancelButton.setText('Cancel')
# cancelButton.clicked.connect(self.cancel_btn.click)
# messageBar.layout().addWidget(progressBar)
# messageBar.layout().addWidget(cancelButton)
# self.iface.messageBar().pushWidget(messageBar, QgsMessageBar.INFO)
# # start the worker in a new thread
# worker.moveToThread(self.thread)
# self.thread.started.connect(worker.run)
# # setup signals
# worker.log.connect(self.log)
# worker.status.connect(self.status)
# worker.progress.connect(progressBar.setValue)
# worker.progress.connect(self.progressBar.setValue)
# worker.error.connect(self.error)
# worker.finished.connect(self.finish)
# # hold objects
# self.messageBar = messageBar
# self.action = action
# # fire thread
# self.thread.start(priority=5)
# self.thread.exec_()
if __name__ == "__main__":
pass
|
vitorhirota/QgisImageAnalysis
|
analysis_widget.py
|
Python
|
gpl-2.0
| 9,304 | 0.00129 |
#!/usr/bin/env python
#
# GrovePi Example for using the Grove Light Sensor and the LED together to turn the LED On and OFF if the background light is greater than a threshold.
# Modules:
# http://www.seeedstudio.com/wiki/Grove_-_Light_Sensor
# http://www.seeedstudio.com/wiki/Grove_-_LED_Socket_Kit
#
# The GrovePi connects the Raspberry Pi and Grove sensors. You can learn more about GrovePi here: http://www.dexterindustries.com/GrovePi
#
# Have a question about this example? Ask on the forums here: http://www.dexterindustries.com/forum/?forum=grovepi
#
'''
## License
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2015 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import time
import grovepi
# Connect the Grove Light Sensor to analog port A0
# SIG,NC,VCC,GND
light_sensor = 0
# Connect the LED to digital port D4
# SIG,NC,VCC,GND
led = 4
# Turn on LED once sensor exceeds threshold resistance
threshold = 10
grovepi.pinMode(light_sensor,"INPUT")
grovepi.pinMode(led,"OUTPUT")
while True:
try:
# Get sensor value
sensor_value = grovepi.analogRead(light_sensor)
# Calculate resistance of sensor in K
resistance = (float)(1023 - sensor_value) * 10 / sensor_value
if resistance > threshold:
# Send HIGH to switch on LED
grovepi.digitalWrite(led,1)
else:
# Send LOW to switch off LED
grovepi.digitalWrite(led,0)
print("sensor_value = %d resistance =%.2f" %(sensor_value, resistance))
time.sleep(.5)
except IOError:
print ("Error")
|
penoud/GrovePi
|
Software/Python/grove_light_sensor.py
|
Python
|
mit
| 2,674 | 0.004114 |
#!/usr/bin/python
import logging
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
import procgame.game, sys, os
import procgame.config
import random
import procgame.sound
sys.path.insert(0,os.path.pardir)
import bingo_emulator.common.units as units
import bingo_emulator.common.functions as functions
from bingo_emulator.graphics import methods as graphics
from bingo_emulator.graphics.spelling_bee import *
class MulticardBingo(procgame.game.Mode):
def __init__(self, game):
super(MulticardBingo, self).__init__(game=game, priority=5)
self.holes = []
self.startup()
self.game.sound.register_music('motor', "audio/woodrail_motor.wav")
self.game.sound.register_music('search1', "audio/automatic_search_one_ball.wav")
self.game.sound.register_music('search2', "audio/automatic_search_two_ball.wav")
self.game.sound.register_music('search3', "audio/automatic_search_three_ball.wav")
self.game.sound.register_music('search4', "audio/automatic_search_four_ball.wav")
self.game.sound.register_music('search5', "audio/automatic_search_five_ball.wav")
self.game.sound.register_music('search6', "audio/automatic_search_six_ball.wav")
self.game.sound.register_music('search7', "audio/automatic_search_seven_ball.wav")
self.game.sound.register_music('search8', "audio/automatic_search_eight_ball.wav")
self.game.sound.register_sound('add', "audio/woodrail_coin.wav")
self.game.sound.register_sound('tilt', "audio/tilt.wav")
self.game.sound.register_sound('step', "audio/step.wav")
def sw_coin_active(self, sw):
self.game.tilt.disengage()
self.regular_play()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_startButton_active(self, sw):
if self.game.replays > 0 or self.game.switches.freeplay.is_active():
self.game.tilt.disengage()
self.regular_play()
def sw_trough4_active_for_1s(self, sw):
if self.game.ball_count.position >= 4:
self.timeout_actions()
def timeout_actions(self):
if (self.game.timer.position < 39):
self.game.timer.step()
self.delay(name="timeout", delay=5.0, handler=self.timeout_actions)
else:
self.game.timer.step()
self.tilt_actions()
def sw_trough8_closed(self, sw):
if self.game.start.status == False:
self.game.ball_count.position -= 1
self.game.returned = True
self.check_lifter_status()
else:
self.check_lifter_status()
def sw_enter_active(self, sw):
if self.game.switches.left.is_active() and self.game.switches.right.is_active():
self.game.end_run_loop()
os.system("/home/nbaldridge/proc/bingo_emulator/start_game.sh spelling_bee")
def check_shutter(self, start=0):
if start == 1:
if self.game.switches.smRunout.is_active():
if self.game.switches.shutter.is_active():
self.game.coils.shutter.disable()
else:
if self.game.switches.shutter.is_inactive():
if self.game.switches.smRunout.is_active():
self.game.coils.shutter.disable()
def regular_play(self):
self.holes = []
self.cancel_delayed(name="search")
self.cancel_delayed(name="card1_replay_step_up")
self.cancel_delayed(name="card2_replay_step_up")
self.cancel_delayed(name="card3_replay_step_up")
self.cancel_delayed(name="card4_replay_step_up")
self.cancel_delayed(name="timeout")
self.game.search_index.disengage()
self.game.coils.counter.pulse()
self.game.returned = False
self.game.sound.stop('add')
self.game.sound.play('add')
if self.game.start.status == True:
if self.game.selector.position <= 3:
self.game.selector.step()
if self.game.switches.shutter.is_inactive():
self.game.coils.shutter.enable()
self.replay_step_down()
self.check_lifter_status()
else:
self.game.start.engage(self.game)
self.game.card1_replay_counter.reset()
self.game.card2_replay_counter.reset()
self.game.card3_replay_counter.reset()
self.game.card4_replay_counter.reset()
self.game.average.disengage()
self.game.good.disengage()
self.game.expert.disengage()
self.game.average.engage(self.game)
self.game.selector.reset()
self.game.ball_count.reset()
self.game.timer.reset()
self.game.sound.play_music('motor', -1)
self.regular_play()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
self.game.tilt.disengage()
def check_lifter_status(self):
if self.game.tilt.status == False:
if self.game.switches.trough8.is_closed() and self.game.switches.trough5.is_open() and self.game.switches.trough4.is_open() and self.game.switches.trough3.is_closed() and self.game.switches.trough2.is_closed():
if self.game.switches.shooter.is_open():
self.game.coils.lifter.enable()
self.game.returned = False
else:
if self.game.start.status == False:
if self.game.switches.trough4.is_open():
if self.game.switches.shooter.is_open():
if self.game.switches.gate.is_closed():
self.game.coils.lifter.enable()
else:
if self.game.returned == True and self.game.ball_count.position == 4:
if self.game.switches.shooter.is_open():
self.game.coils.lifter.enable()
self.game.returned = False
def sw_smRunout_active_for_1ms(self, sw):
if self.game.start.status == True:
self.check_shutter(1)
else:
self.check_shutter()
def sw_trough1_closed(self, sw):
if self.game.switches.shooter.is_closed():
self.game.coils.lifter.disable()
def sw_ballLift_active_for_500ms(self, sw):
if self.game.tilt.status == False:
if self.game.switches.shooter.is_open():
if self.game.ball_count.position < 5:
self.game.coils.lifter.enable()
def sw_gate_inactive_for_1ms(self, sw):
self.game.start.disengage()
if self.game.switches.shutter.is_active():
self.game.coils.shutter.enable()
self.game.ball_count.step()
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
if self.game.ball_count.position <= 4:
self.check_lifter_status()
# This is really nasty, but it is how we render graphics for each individual hole.
# numbers are added (or removed from) a list. In this way, I can re-use the same
# routine even for games where there are ball return functions like Surf Club.
def sw_hole1_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(1)
if self.game.good.status == True:
self.game.average.disengage()
self.game.good.disengage()
self.game.expert.engage(self.game)
else:
self.game.average.disengage()
self.game.good.engage(self.game)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole2_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(2)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole3_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(3)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole4_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(4)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole5_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(5)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole6_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(6)
if self.game.good.status == True:
self.game.average.disengage()
self.game.good.disengage()
self.game.expert.engage(self.game)
else:
self.game.average.disengage()
self.game.good.engage(self.game)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole7_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(7)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole8_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(8)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole9_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(9)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole10_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(10)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole11_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(11)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole12_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(12)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole13_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(13)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole14_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(14)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole15_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(15)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole16_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(16)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole17_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(17)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_hole18_active_for_40ms(self, sw):
if self.game.tilt.status == False and self.game.start.status == False:
self.holes.append(18)
if self.game.ball_count.position >= 4:
if self.game.search_index.status == False:
self.search()
self.search_sounds()
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def sw_replayReset_active(self, sw):
self.game.anti_cheat.disengage()
self.holes = []
graphics.spelling_bee.display(self)
self.tilt_actions()
self.replay_step_down(self.game.replays)
def tilt_actions(self):
self.game.start.disengage()
self.cancel_delayed(name="replay_reset")
self.cancel_delayed(name="card1_replay_step_up")
self.cancel_delayed(name="card2_replay_step_up")
self.cancel_delayed(name="card3_replay_step_up")
self.cancel_delayed(name="card4_replay_step_up")
self.cancel_delayed(name="timeout")
self.game.search_index.disengage()
if self.game.ball_count.position == 0:
if self.game.switches.shutter.is_active():
self.game.coils.shutter.enable()
self.holes = []
self.game.selector.reset()
self.game.ball_count.reset()
self.game.anti_cheat.engage(game)
self.game.tilt.engage(self.game)
self.game.average.disengage()
self.game.good.disengage()
self.game.expert.disengage()
self.game.sound.stop_music()
self.game.sound.play('tilt')
# displays "Tilt" on the backglass, you have to recoin.
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
def search_sounds(self):
self.game.sound.stop_music()
if self.game.ball_count.position == 1:
self.game.sound.play_music('search1', -1)
if self.game.ball_count.position == 2:
self.game.sound.play_music('search2', -1)
if self.game.ball_count.position == 3:
self.game.sound.play_music('search3', -1)
if self.game.ball_count.position == 4:
self.game.sound.play_music('search4', -1)
if self.game.ball_count.position == 5:
self.game.sound.play_music('search5', -1)
if self.game.ball_count.position == 6:
self.game.sound.play_music('search6', -1)
if self.game.ball_count.position == 7:
self.game.sound.play_music('search7', -1)
if self.game.ball_count.position == 8:
self.game.sound.play_music('search8', -1)
def sw_tilt_active(self, sw):
if self.game.tilt.status == False:
self.tilt_actions()
def replay_step_down(self, number=0):
if number > 0:
if number > 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.spelling_bee.display(self)
self.delay(name="replay_reset", delay=0.13, handler=self.replay_step_down, param=number)
elif number == 1:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerDown.pulse()
number -= 1
graphics.spelling_bee.display(self)
self.cancel_delayed(name="replay_reset")
else:
if self.game.replays > 0:
self.game.replays -= 1
graphics.replay_step_down(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.delay(name="display", delay=0.1, handler=graphics.spelling_bee.display, param=self)
self.game.coils.registerDown.pulse()
def replay_step_up(self):
if self.game.replays < 99:
self.game.replays += 1
graphics.replay_step_up(self.game.replays, graphics.spelling_bee.reel1, graphics.spelling_bee.reel10, graphics.spelling_bee.reel100)
self.game.coils.registerUp.pulse()
graphics.spelling_bee.display(self)
def search(self):
# The search workflow/logic will determine if you actually have a winner, but it is a bit tricky.
# if the ball is in a particular hole, the search relays need to click and/or clack, and
# when you have at least three going at once, it should latch on the search index and score.
# This scoring is tempered by the selector disc. You have to have the card enabled that you're
# winning on. This whole process will have to happen on a rotational basis. The search should really
# begin immediately upon the first ball landing in the hole.
# I suspect that the best, fastest way to complete the search is actually to reimplement the mechanical
# search activity. For each revolution of the search disc (which happens about every 5-7 seconds), the
# game will activate() each search relay for each 'hot' rivet on the search disc. This can be on a different
# wiper finger for each set of rivets on the search disc.
# Replay counters also need to be implemented to prevent the supplemental searches from scoring.
for i in range(0, 100):
if i <= 50:
self.r = self.closed_search_relays(self.game.searchdisc.position)
self.game.searchdisc.spin()
if i >= 51:
self.r = self.closed_search_relays(self.game.searchdisc2.position + 50)
self.game.searchdisc2.spin()
self.wipers = self.r[0]
self.card = self.r[1]
self.four = self.r[2]
# From here, I need to determine based on the value of r, whether to latch the search index and score. For Bright Lights,
# I need to determine the best winner on each card. To do this, I must compare the position of the replay counter before
# determining the winner. Reminder that my replay counters are a 1:1 representation.
self.match = []
for key in self.wipers:
for number in self.holes:
if number == key:
self.match.append(self.wipers[key])
relays = sorted(set(self.match))
#TODO Play sound for each relay closure.
s = functions.count_seq(relays)
if self.game.selector.position >= self.card:
if s >= 3:
self.find_winner(s, self.card, self.four)
break
def find_winner(self, relays, card, four):
if self.game.search_index.status == False and self.game.replays < 99:
if card == 1:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card1_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(amount - self.game.card1_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card1_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card1_replay_step_up(amount - self.game.card1_replay_counter.position)
if card == 2:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card2_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card2_replay_step_up(amount - self.game.card2_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card2_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card2_replay_step_up(amount - self.game.card2_replay_counter.position)
if card == 3:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card3_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card3_replay_step_up(amount - self.game.card3_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card3_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card3_replay_step_up(amount - self.game.card3_replay_counter.position)
if card == 4:
if relays == 3 and not four:
amount = 2
if self.game.good.status == True:
amount = 3
if self.game.expert.status == True:
amount = 16
if self.game.card4_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card4_replay_step_up(amount - self.game.card4_replay_counter.position)
if relays == 4:
amount = 8
if self.game.good.status == True:
amount = 12
if self.game.card4_replay_counter.position < amount:
self.game.search_index.engage(self.game)
self.card4_replay_step_up(amount - self.game.card4_replay_counter.position)
def card1_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.card1_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 99:
number = 0
self.delay(name="card1_replay_step_up", delay=0.1, handler=self.card1_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="card1_replay_step_up")
self.search_sounds()
self.search()
def card2_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.card2_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 99:
number = 0
self.delay(name="card2_replay_step_up", delay=0.1, handler=self.card2_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="card2_replay_step_up")
self.search_sounds()
self.search()
def card3_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.card3_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 99:
number = 0
self.delay(name="card3_replay_step_up", delay=0.1, handler=self.card3_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="card3_replay_step_up")
self.search_sounds()
self.search()
def card4_replay_step_up(self, number):
self.game.sound.stop_music()
if number >= 1:
self.game.card4_replay_counter.step()
number -= 1
self.replay_step_up()
if self.game.replays == 99:
number = 0
self.delay(name="card4_replay_step_up", delay=0.1, handler=self.card4_replay_step_up, param=number)
else:
self.game.search_index.disengage()
self.cancel_delayed(name="card4_replay_step_up")
self.search_sounds()
self.search()
def closed_search_relays(self, rivets):
# This function is critical, as it will determine which card is returned, etc. I need to check both the position of the
# replay counter for the card, as well as the selector unit to ensure that the card is selected. We will get a row back
# that has the numbers on the position which will return the search relay connected. When three out of the five relays
# are connected, we get a winner!
self.pos = {}
# Card 1
self.pos[0] = {}
self.pos[1] = {2:1, 3:2, 7:3}
self.pos[2] = {18:1, 2:2, 15:3}
self.pos[3] = {2:1, 10:2, 5:3}
self.pos[4] = {17:1, 2:2, 3:3}
self.pos[5] = {8:1, 2:2, 15:3, 11:4}
self.pos[6] = {9:1, 2:2, 16:3, 14:4}
self.pos[7] = {3:1, 13:2, 2:3, 15:4}
self.pos[8] = {17:1, 2:2, 14:3, 7:4}
self.pos[9] = {8:1, 2:2, 17:3, 7:4}
self.pos[10] = {16:1, 13:2, 18:3, 12:4}
self.pos[11] = {4:1, 13:2, 14:3, 7:4}
self.pos[12] = {3:1, 2:2, 17:3, 7:4}
self.pos[13] = {}
self.pos[14] = {}
self.pos[15] = {}
self.pos[16] = {}
self.pos[17] = {}
# There are five blank positions in between cards. Early games have less to search!
# Card 2
self.pos[18] = {3:1, 10:2, 12:3}
self.pos[19] = {13:1, 15:2, 7:3}
self.pos[20] = {16:1, 2:2, 18:3}
self.pos[21] = {8:1, 10:2, 15:3}
self.pos[22] = {17:1, 9:2, 7:3, 12:4}
self.pos[23] = {15:1, 13:2, 5:3, 7:4}
self.pos[24] = {16:1, 10:2, 15:3, 7:4}
self.pos[25] = {11:1, 2:2, 5:3, 7:4}
self.pos[26] = {4:1, 2:2, 15:3, 17:4}
self.pos[27] = {9:1, 10:2, 5:3, 7:4}
self.pos[28] = {15:1, 10:2, 5:3, 7:4}
self.pos[29] = {14:1, 2:2, 12:3, 17:4}
self.pos[30] = {}
self.pos[31] = {}
self.pos[32] = {}
self.pos[33] = {}
self.pos[34] = {}
# Another five blank positions. Can you believe it?
# Card 3
self.pos[35] = {9:1, 10:2, 5:3}
self.pos[36] = {14:1, 2:2, 12:3}
self.pos[37] = {4:1, 2:2, 3:3}
self.pos[38] = {3:1, 2:2, 5:3}
self.pos[39] = {17:1, 13:2, 18:3, 12:4}
self.pos[40] = {16:1, 14:2, 2:3, 15:4}
self.pos[41] = {9:1, 2:2, 17:3, 7:4}
self.pos[42] = {3:1, 13:2, 2:3, 17:4}
self.pos[43] = {17:1, 9:2, 7:3, 11:4}
self.pos[44] = {18:1, 2:2, 16:3, 9:4}
self.pos[45] = {8:1, 2:2, 15:3, 7:4}
self.pos[46] = {11:1, 2:2, 17:3, 7:4}
self.pos[47] = {}
self.pos[48] = {}
self.pos[49] = {}
self.pos[50] = {}
# Start of the second search disc modeled as part
# of the same array for simplicity. Parent function
# calls this subset.
# Card #4
self.pos[51] = {9:1, 10:2, 11:3}
self.pos[52] = {7:1, 2:2, 15:3}
self.pos[53] = {16:1, 10:2, 15:3}
self.pos[54] = {8:1, 13:2, 15:3}
self.pos[55] = {3:1, 2:2, 15:3, 7:4}
self.pos[56] = {5:1, 13:2, 12:3, 7:4}
self.pos[57] = {3:1, 2:2, 5:3, 7:4}
self.pos[58] = {17:1, 7:2, 12:3, 5:4}
self.pos[59] = {15:1, 2:2, 12:3, 17:4}
self.pos[60] = {4:1, 13:2, 5:3, 7:4}
self.pos[61] = {14:1, 2:2, 15:3, 17:4}
self.pos[62] = {17:1, 18:2, 10:3, 12:4}
self.pos[63] = {}
self.pos[64] = {}
self.pos[65] = {}
self.pos[66] = {}
self.pos[67] = {}
# Card #5
self.pos[68] = {}
self.pos[69] = {}
self.pos[70] = {}
self.pos[71] = {}
self.pos[72] = {}
self.pos[73] = {}
self.pos[74] = {}
self.pos[75] = {}
self.pos[76] = {}
self.pos[77] = {}
self.pos[78] = {}
self.pos[79] = {}
self.pos[80] = {}
self.pos[81] = {}
self.pos[82] = {}
self.pos[83] = {}
self.pos[84] = {}
# Card #6
self.pos[85] = {}
self.pos[86] = {}
self.pos[87] = {}
self.pos[88] = {}
self.pos[89] = {}
self.pos[90] = {}
self.pos[91] = {}
self.pos[92] = {}
self.pos[93] = {}
self.pos[94] = {}
self.pos[95] = {}
self.pos[96] = {}
self.pos[97] = {}
self.pos[98] = {}
self.pos[99] = {}
self.pos[100] = {}
four = 0
if rivets in range(0,18):
card = 1
if rivets in range(5,13):
four = 0
if rivets in range(18,35):
card = 2
if rivets in range(22,30):
four = 0
if rivets in range(35,50):
card = 3
if rivets in range(39,47):
four = 0
if rivets in range(50,100):
card = 4
if rivets in range(55,62):
four = 0
return (self.pos[rivets], card, four)
# Define reset as the knock-off, anti-cheat relay disabled, and replay reset enabled. Motors turn while credits are knocked off.
# When meter reaches zero and the zero limit switch is hit, turn off motor sound and leave backglass gi on, but with tilt displayed.
def startup(self):
# Every bingo requires the meter to register '0'
# before allowing coin entry --
# also needs to show a plain 'off' backglass.
self.eb = False
self.tilt_actions()
class SpellingBee(procgame.game.BasicGame):
""" Spelling Bee was a re-run of Crosswords """
def __init__(self, machine_type):
super(SpellingBee, self).__init__(machine_type)
pygame.mixer.pre_init(44100,-16,2,512)
self.sound = procgame.sound.SoundController(self)
self.sound.set_volume(1.0)
# NOTE: trough_count only counts the number of switches present in the trough. It does _not_ count
# the number of balls present. In this game, there should be 8 balls.
self.trough_count = 6
# Subclass my units unique to this game - modifications must be made to set up mixers and steppers unique to the game
# NOTE: 'top' positions are indexed using a 0 index, so the top on a 24 position unit is actually 23.
self.searchdisc = units.Search("searchdisc", 49)
self.searchdisc2 = units.Search("searchdisc2", 49)
#Seach relays
self.s1 = units.Relay("s1")
self.s2 = units.Relay("s2")
self.s3 = units.Relay("s3")
self.s4 = units.Relay("s4")
self.s5 = units.Relay("s5")
self.search_index = units.Relay("search_index")
#Replay Counter
self.card1_replay_counter = units.Stepper("card1_replay_counter", 100)
self.card2_replay_counter = units.Stepper("card2_replay_counter", 100)
self.card3_replay_counter = units.Stepper("card3_replay_counter", 100)
self.card4_replay_counter = units.Stepper("card4_replay_counter", 100)
#Hole in One uses a specialized scoring system, tracking your shots.
self.average = units.Relay("average")
self.good = units.Relay("good")
self.expert = units.Relay("expert")
#Initialize stepper units used to keep track of features or timing.
self.selector = units.Stepper("selector", 4)
self.timer = units.Stepper("timer", 40)
self.ball_count = units.Stepper("ball_count", 5)
#When engage()d, light 6v circuit, and enable game features, scoring,
#etc. Disengage()d means that the machine is 'soft' tilted.
self.anti_cheat = units.Relay("anti_cheat")
#When engage()d, spin.
self.start = units.Relay("start")
#Tilt is separate from anti-cheat in that the trip will move the shutter
#when the game is tilted with 1st ball in the lane. Also prevents you
#from picking back up by killing the anti-cheat. Can be engaged by
#tilt bob, slam tilt switches, or timer at 39th step.
#Immediately kills motors.
self.tilt = units.Relay("tilt")
self.replays = 0
self.returned = False
def reset(self):
super(SpellingBee, self).reset()
self.logger = logging.getLogger('game')
self.load_config('bingo.yaml')
main_mode = MulticardBingo(self)
self.modes.add(main_mode)
game = SpellingBee(machine_type='pdb')
game.reset()
game.run_loop()
|
bingopodcast/bingos
|
bingo_emulator/spelling_bee/game.py
|
Python
|
gpl-3.0
| 36,799 | 0.010489 |
# encoding: utf-8
#
# Copyright (C) 2013 midnightBITS/Marcin Zdun
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
'''
Created on 09-05-2017
@author: Marcin Zdun
'''
def getAnchor(index, defndx, *points):
if index < 0: return points[defndx]
index %= len(points)
return points[index]
def getAnchorDiag(diag, index, defndx, *points):
if index < 0: return points[defndx]
index %= len(points)
return points[index]
def boxAnchor(index, defndx, x1, y1, x2, y2):
w2 = float(x2 - x1)/2
h2 = float(y2 - y1)/2
w4 = w2/2
h4 = h2/2
return getAnchor(index, defndx,
(x1, y1), (x1 + w4, y1), (x1 + w2, y1), (x1 + w2 + w4, y1),
(x2, y1), (x2, y1 + h4), (x2, y1 + h2), (x2, y1 + h2 + h4),
(x2, y2), (x1 + w2 + w4, y2), (x1 + w2, y2), (x1 + w4, y2),
(x1, y2), (x1, y1 + h2 + h4), (x1, y1 + h2), (x1, y1 + h4)
)
|
mzdun/uml-seq
|
src/uml/sequence/_anchor.py
|
Python
|
mit
| 1,902 | 0.005783 |
# Spectral smoothing functionality
# To do:
# 3) add extra zero-padding for FFT algorithms so they don't go funky at the
# edges?
import numpy as np
from numpy.fft import fft, ifft, fftfreq, rfftfreq
__all__ = ["smoothspec", "smooth_wave", "smooth_vel", "smooth_lsf",
"smooth_wave_fft", "smooth_vel_fft", "smooth_fft", "smooth_lsf_fft",
"mask_wave", "resample_wave"]
ckms = 2.998e5
sigma_to_fwhm = 2.355
def smoothspec(wave, spec, resolution=None, outwave=None,
smoothtype="vel", fftsmooth=True,
min_wave_smooth=0, max_wave_smooth=np.inf, **kwargs):
"""
Parameters
----------
wave : ndarray of shape ``(N_pix,)``
The wavelength vector of the input spectrum. Assumed Angstroms.
spec : ndarray of shape ``(N_pix,)``
The flux vector of the input spectrum.
resolution : float
The smoothing parameter. Units depend on ``smoothtype``.
outwave : ``None`` or ndarray of shape ``(N_pix_out,)``
The output wavelength vector. If ``None`` then the input wavelength
vector will be assumed, though if ``min_wave_smooth`` or
``max_wave_smooth`` are also specified, then the output spectrum may
have different length than ``spec`` or ``wave``, or the convolution may
be strange outside of ``min_wave_smooth`` and ``max_wave_smooth``.
Basically, always set ``outwave`` to be safe.
smoothtype : string, optional, default: "vel"
The type of smoothing to perform. One of:
+ ``"vel"`` - velocity smoothing, ``resolution`` units are in km/s
(dispersion not FWHM)
+ ``"R"`` - resolution smoothing, ``resolution`` is in units of
:math:`\lambda/ \sigma_\lambda` (where :math:`\sigma_\lambda` is
dispersion, not FWHM)
+ ``"lambda"`` - wavelength smoothing. ``resolution`` is in units of
Angstroms
+ ``"lsf"`` - line-spread function. Use an aribitrary line spread
function, which can be given as a vector the same length as ``wave``
that gives the dispersion (in AA) at each wavelength. Alternatively,
if ``resolution`` is ``None`` then a line-spread function must be
present as an additional ``lsf`` keyword. In this case all additional
keywords as well as the ``wave`` vector will be passed to this ``lsf``
function.
fftsmooth : bool, optional, default: True
Switch to use FFTs to do the smoothing, usually resulting in massive
speedups of all algorithms. However, edge effects may be present.
min_wave_smooth : float, optional default: 0
The minimum wavelength of the input vector to consider when smoothing
the spectrum. If ``None`` then it is determined from the output
wavelength vector and padded by some multiple of the desired resolution.
max_wave_smooth : float, optional, default: inf
The maximum wavelength of the input vector to consider when smoothing
the spectrum. If None then it is determined from the output wavelength
vector and padded by some multiple of the desired resolution.
inres : float, optional
If given, this parameter specifies the resolution of the input. This
resolution is subtracted in quadrature from the target output resolution
before the kernel is formed.
In certain cases this can be used to properly switch from resolution
that is constant in velocity to one that is constant in wavelength,
taking into account the wavelength dependence of the input resolution
when defined in terms of lambda. This is possible iff:
* ``fftsmooth`` is False
* ``smoothtype`` is ``"lambda"``
* The optional ``in_vel`` parameter is supplied and True.
The units of ``inres`` should be the same as the units of
``resolution``, except in the case of switching from velocity to
wavelength resolution, in which case the units of ``inres`` should be
in units of lambda/sigma_lambda.
in_vel : float (optional)
If supplied and True, the ``inres`` parameter is assumed to be in units
of lambda/sigma_lambda. This parameter is ignored **unless** the
``smoothtype`` is ``"lambda"`` and ``fftsmooth`` is False.
Returns
-------
flux : ndarray of shape ``(N_pix_out,)``
The smoothed spectrum on the `outwave` grid, ndarray.
"""
if smoothtype == 'vel':
linear = False
units = 'km/s'
sigma = resolution
fwhm = sigma * sigma_to_fwhm
Rsigma = ckms / sigma
R = ckms / fwhm
width = Rsigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='vel'"
elif smoothtype == 'R':
linear = False
units = 'km/s'
Rsigma = resolution
sigma = ckms / Rsigma
fwhm = sigma * sigma_to_fwhm
R = ckms / fwhm
width = Rsigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='R'"
# convert inres from Rsigma to sigma (km/s)
try:
kwargs['inres'] = ckms / kwargs['inres']
except(KeyError):
pass
elif smoothtype == 'lambda':
linear = True
units = 'AA'
sigma = resolution
fwhm = sigma * sigma_to_fwhm
Rsigma = None
R = None
width = sigma
assert np.size(sigma) == 1, "`resolution` must be scalar for `smoothtype`='lambda'"
elif smoothtype == 'lsf':
linear = True
width = 100
sigma = resolution
else:
raise ValueError("smoothtype {} is not valid".format(smoothtype))
# Mask the input spectrum depending on outwave or the wave_smooth kwargs
mask = mask_wave(wave, width=width, outwave=outwave, linear=linear,
wlo=min_wave_smooth, whi=max_wave_smooth, **kwargs)
w = wave[mask]
s = spec[mask]
if outwave is None:
outwave = wave
# Choose the smoothing method
if smoothtype == 'lsf':
if fftsmooth:
smooth_method = smooth_lsf_fft
if sigma is not None:
# mask the resolution vector
sigma = resolution[mask]
else:
smooth_method = smooth_lsf
if sigma is not None:
# convert to resolution on the output wavelength grid
sigma = np.interp(outwave, wave, resolution)
elif linear:
if fftsmooth:
smooth_method = smooth_wave_fft
else:
smooth_method = smooth_wave
else:
if fftsmooth:
smooth_method = smooth_vel_fft
else:
smooth_method = smooth_vel
# Actually do the smoothing and return
return smooth_method(w, s, outwave, sigma, **kwargs)
def smooth_vel(wave, spec, outwave, sigma, nsigma=10, inres=0, **extras):
"""Smooth a spectrum in velocity space. This is insanely slow, but general
and correct.
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired velocity resolution (km/s), *not* FWHM.
:param nsigma:
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres:
The velocity resolution of the input spectrum (km/s), *not* FWHM.
"""
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq) < 0.0:
raise ValueError("Desired velocity resolution smaller than the value"
"possible for this input spectrum.".format(inres))
# sigma_eff is in units of sigma_lambda / lambda
sigma_eff = np.sqrt(sigma_eff_sq) / ckms
lnwave = np.log(wave)
flux = np.zeros(len(outwave))
for i, w in enumerate(outwave):
x = (np.log(w) - lnwave) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_vel_fft(wavelength, spectrum, outwave, sigma_out, inres=0.0,
**extras):
"""Smooth a spectrum in velocity space, using FFTs. This is fast, but makes
some assumptions about the form of the input spectrum and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum. An assertion error will result
if this is not a regular grid in wavelength.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma_out:
Desired velocity resolution (km/s), *not* FWHM. Scalar or length 1 array.
:param inres:
The velocity resolution of the input spectrum (km/s), dispersion *not*
FWHM.
"""
# The kernel width for the convolution.
sigma = np.sqrt(sigma_out**2 - inres**2)
if sigma <= 0:
return np.interp(outwave, wavelength, spectrum)
# make length of spectrum a power of 2 by resampling
wave, spec = resample_wave(wavelength, spectrum)
# get grid resolution (*not* the resolution of the input spectrum) and make
# sure it's nearly constant. It should be, by design (see resample_wave)
invRgrid = np.diff(np.log(wave))
assert invRgrid.max() / invRgrid.min() < 1.05
dv = ckms * np.median(invRgrid)
# Do the convolution
spec_conv = smooth_fft(dv, spec, sigma)
# interpolate onto output grid
if outwave is not None:
spec_conv = np.interp(outwave, wave, spec_conv)
return spec_conv
def smooth_wave(wave, spec, outwave, sigma, nsigma=10, inres=0, in_vel=False,
**extras):
"""Smooth a spectrum in wavelength space. This is insanely slow, but
general and correct (except for the treatment of the input resolution if it
is velocity)
:param wave:
Wavelength vector of the input spectrum.
:param spec:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units. This can be a
vector of same length as ``wave``, in which case a wavelength dependent
broadening is calculated
:param nsigma: (optional, default=10)
Number of sigma away from the output wavelength to consider in the
integral. If less than zero, all wavelengths are used. Setting this
to some positive number decreses the scaling constant in the O(N_out *
N_in) algorithm used here.
:param inres: (optional, default: 0.0)
Resolution of the input, in either wavelength units or
lambda/dlambda (c/v). Ignored if <= 0.
:param in_vel: (optional, default: False)
If True, the input spectrum has been smoothed in velocity
space, and ``inres`` is assumed to be in lambda/dlambda.
:returns flux:
The output smoothed flux vector, same length as ``outwave``.
"""
# sigma_eff is in angstroms
if inres <= 0:
sigma_eff_sq = sigma**2
elif in_vel:
# Make an approximate correction for the intrinsic wavelength
# dependent dispersion. This sort of maybe works.
sigma_eff_sq = sigma**2 - (wave / inres)**2
else:
sigma_eff_sq = sigma**2 - inres**2
if np.any(sigma_eff_sq < 0):
raise ValueError("Desired wavelength sigma is lower than the value "
"possible for this input spectrum.")
sigma_eff = np.sqrt(sigma_eff_sq)
flux = np.zeros(len(outwave))
for i, w in enumerate(outwave):
x = (wave - w) / sigma_eff
if nsigma > 0:
good = np.abs(x) < nsigma
x = x[good]
_spec = spec[good]
else:
_spec = spec
f = np.exp(-0.5 * x**2)
flux[i] = np.trapz(f * _spec, x) / np.trapz(f, x)
return flux
def smooth_wave_fft(wavelength, spectrum, outwave, sigma_out=1.0,
inres=0.0, **extras):
"""Smooth a spectrum in wavelength space, using FFTs. This is fast, but
makes some assumptions about the input spectrum, and can have some
issues at the ends of the spectrum depending on how it is padded.
:param wavelength:
Wavelength vector of the input spectrum.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma:
Desired resolution (*not* FWHM) in wavelength units.
:param inres:
Resolution of the input, in wavelength units (dispersion not FWHM).
:returns flux:
The output smoothed flux vector, same length as ``outwave``.
"""
# restrict wavelength range (for speed)
# should also make nearest power of 2
wave, spec = resample_wave(wavelength, spectrum, linear=True)
# The kernel width for the convolution.
sigma = np.sqrt(sigma_out**2 - inres**2)
if sigma < 0:
return np.interp(wave, outwave, flux)
# get grid resolution (*not* the resolution of the input spectrum) and make
# sure it's nearly constant. Should be by design (see resample_wave)
Rgrid = np.diff(wave)
assert Rgrid.max() / Rgrid.min() < 1.05
dw = np.median(Rgrid)
# Do the convolution
spec_conv = smooth_fft(dw, spec, sigma)
# interpolate onto output grid
if outwave is not None:
spec_conv = np.interp(outwave, wave, spec_conv)
return spec_conv
def smooth_lsf(wave, spec, outwave, sigma=None, lsf=None, return_kernel=False,
**kwargs):
"""Broaden a spectrum using a wavelength dependent line spread function.
This function is only approximate because it doesn't actually do the
integration over pixels, so for sparsely sampled points you'll have
problems. This function needs to be checked and possibly rewritten.
:param wave:
Input wavelengths. ndarray of shape (nin,)
:param spec:
Input spectrum. ndarray of same shape as ``wave``.
:param outwave:
Output wavelengths, ndarray of shape (nout,)
:param sigma: (optional, default: None)
The dispersion (not FWHM) as a function of wavelength that you want to
apply to the input spectrum. ``None`` or ndarray of same length as
``outwave``. If ``None`` then the wavelength dependent dispersion will be
calculated from the function supplied with the ``lsf`` keyward.
:param lsf:
A function that returns the gaussian dispersion at each wavelength.
This is assumed to be in sigma, not FWHM.
:param kwargs:
Passed to the function supplied in the ``lsf`` keyword.
:param return_kernel: (optional, default: False)
If True, return the kernel used to broaden the spectrum as ndarray of
shape (nout, nin).
:returns newspec:
The broadened spectrum, same length as ``outwave``.
"""
if (lsf is None) and (sigma is None):
return np.interp(outwave, wave, spec)
dw = np.gradient(wave)
if sigma is None:
sigma = lsf(outwave, **kwargs)
kernel = outwave[:, None] - wave[None, :]
kernel = (1 / (sigma * np.sqrt(np.pi * 2))[:, None] *
np.exp(-kernel**2 / (2 * sigma[:, None]**2)) *
dw[None, :])
# should this be axis=0 or axis=1?
kernel = kernel / kernel.sum(axis=1)[:, None]
newspec = np.dot(kernel, spec)
# kernel /= np.trapz(kernel, wave, axis=1)[:, None]
# newspec = np.trapz(kernel * spec[None, :], wave, axis=1)
if return_kernel:
return newspec, kernel
return newspec
def smooth_lsf_fft(wave, spec, outwave, sigma=None, lsf=None, pix_per_sigma=2,
eps=0.25, preserve_all_input_frequencies=False, **kwargs):
"""Smooth a spectrum by a wavelength dependent line-spread function, using
FFTs.
:param wave:
Wavelength vector of the input spectrum.
:param spectrum:
Flux vector of the input spectrum.
:param outwave:
Desired output wavelength vector.
:param sigma: (optional)
Dispersion (in same units as ``wave``) as a function `wave`. ndarray
of same length as ``wave``. If not given, sigma will be computed from
the function provided by the ``lsf`` keyword.
:param lsf: (optional)
Function used to calculate the dispersion as a function of wavelength.
Must be able to take as an argument the ``wave`` vector and any extra
keyword arguments and return the dispersion (in the same units as the
input wavelength vector) at every value of ``wave``. If not provided
then ``sigma`` must be specified.
:param pix_per_sigma: (optional, default: 2)
Number of pixels per sigma of the smoothed spectrum to use in
intermediate interpolation and FFT steps. Increasing this number will
increase the accuracy of the output (to a point), and the run-time, by
preserving all high-frequency information in the input spectrum.
:param preserve_all_input_frequencies: (default: False)
This is a switch to use a very dense sampling of the input spectrum
that preserves all input frequencies. It can significantly increase
the call time for often modest gains...
:param eps: (optional)
Deprecated.
:param **kwargs:
All additional keywords are passed to the function supplied to the
``lsf`` keyword, if present.
:returns flux:
The input spectrum smoothed by the wavelength dependent line-spread
function. Same length as ``outwave``.
"""
# This is sigma vs lambda
if sigma is None:
sigma = lsf(wave, **kwargs)
# Now we need the CDF of 1/sigma, which provides the relationship between x and lambda
# does dw go in numerator or denominator?
# I think numerator but should be tested
dw = np.gradient(wave)
cdf = np.cumsum(dw / sigma)
cdf /= cdf.max()
# Now we create an evenly sampled grid in the x coordinate on the interval [0,1]
# and convert that to lambda using the cdf.
# This should result in some power of two x points, for FFT efficiency
# Furthermore, the number of points should be high enough that the
# resolution is critically sampled. And we want to know what the
# resolution is in this new coordinate.
# There are two possible ways to do this
# 1) Choose a point ~halfway in the spectrum
# half = len(wave) / 2
# Now get the x coordinates of a point eps*sigma redder and bluer
# wave_eps = eps * np.array([-1, 1]) * sigma[halpha]
# x_h_eps = np.interp(wave[half] + wave_eps, wave, cdf)
# Take the differences to get dx and dsigma and ratio to get x per sigma
# x_per_sigma = np.diff(x_h_eps) / (2.0 * eps) #x_h_epsilon - x_h
# 2) Get for all points (slower?):
sigma_per_pixel = (dw / sigma)
x_per_pixel = np.gradient(cdf)
x_per_sigma = np.nanmedian(x_per_pixel / sigma_per_pixel)
N = pix_per_sigma / x_per_sigma
# Alternatively, just use the smallest dx of the input, divided by two for safety
# Assumes the input spectrum is critically sampled.
# And does not actually give x_per_sigma, so that has to be determined anyway
if preserve_all_input_frequencies:
# preserve more information in the input spectrum, even when way higher
# frequency than the resolution of the output. Leads to slightly more
# accurate output, but with a substantial time hit
N = max(N, 1.0 / np.nanmin(x_per_pixel))
# Now find the smallest power of two that divides the interval (0, 1) into
# segments that are smaller than dx
nx = int(2**np.ceil(np.log2(N)))
# now evenly sample in the x coordinate
x = np.linspace(0, 1, nx)
dx = 1.0 / nx
# And now we get the spectrum at the lambda coordinates of the even grid in x
lam = np.interp(x, cdf, wave)
newspec = np.interp(lam, wave, spec)
# And now we convolve.
# If we did not know sigma in terms of x we could estimate it here
# from the resulting sigma(lamda(x)) / dlambda(x):
# dlam = np.gradient(lam)
# sigma_x = np.median(lsf(lam, **kwargs) / dlam)
# But the following just uses the fact that we know x_per_sigma (duh).
spec_conv = smooth_fft(dx, newspec, x_per_sigma)
# and interpolate back to the output wavelength grid.
return np.interp(outwave, lam, spec_conv)
def smooth_fft(dx, spec, sigma):
"""Basic math for FFT convolution with a gaussian kernel.
:param dx:
The wavelength or velocity spacing, same units as sigma
:param sigma:
The width of the gaussian kernel, same units as dx
:param spec:
The spectrum flux vector
"""
# The Fourier coordinate
ss = rfftfreq(len(spec), d=dx)
# Make the fourier space taper; just the analytical fft of a gaussian
taper = np.exp(-2 * (np.pi ** 2) * (sigma ** 2) * (ss ** 2))
ss[0] = 0.01 # hack
# Fourier transform the spectrum
spec_ff = np.fft.rfft(spec)
# Multiply in fourier space
ff_tapered = spec_ff * taper
# Fourier transform back
spec_conv = np.fft.irfft(ff_tapered)
return spec_conv
def mask_wave(wavelength, width=1, wlo=0, whi=np.inf, outwave=None,
nsigma_pad=20.0, linear=False, **extras):
"""Restrict wavelength range (for speed) but include some padding based on
the desired resolution.
"""
# Base wavelength limits
if outwave is not None:
wlim = np.array([outwave.min(), outwave.max()])
else:
wlim = np.squeeze(np.array([wlo, whi]))
# Pad by nsigma * sigma_wave
if linear:
wlim += nsigma_pad * width * np.array([-1, 1])
else:
wlim *= (1 + nsigma_pad / width * np.array([-1, 1]))
mask = (wavelength > wlim[0]) & (wavelength < wlim[1])
return mask
def resample_wave(wavelength, spectrum, linear=False):
"""Resample spectrum, so that the number of elements is the next highest
power of two. This uses np.interp. Note that if the input wavelength grid
did not critically sample the spectrum then there is no gaurantee the
output wavelength grid will.
"""
wmin, wmax = wavelength.min(), wavelength.max()
nw = len(wavelength)
nnew = int(2.0**(np.ceil(np.log2(nw))))
if linear:
Rgrid = np.diff(wavelength) # in same units as ``wavelength``
w = np.linspace(wmin, wmax, nnew)
else:
Rgrid = np.diff(np.log(wavelength)) # actually 1/R
lnlam = np.linspace(np.log(wmin), np.log(wmax), nnew)
w = np.exp(lnlam)
# Make sure the resolution really is nearly constant
#assert Rgrid.max() / Rgrid.min() < 1.05
s = np.interp(w, wavelength, spectrum)
return w, s
def subtract_input_resolution(res_in, res_target, smoothtype_in, smoothtype_target, wave=None):
"""Subtract the input resolution (in quadrature) from a target output
resolution to get the width of the kernel that will convolve the input to
the output. Assumes all convolutions are with gaussians.
"""
if smoothtype_in == "R":
width_in = 1.0 / res_in
else:
width_in = res_in
if smoothtype_target == "R":
width_target = 1.0 / res_target
else:
width_target = res_target
if smoothtype_in == smoothtype_target:
dwidth_sq = width_target**2 - width_in**2
elif (smoothtype_in == "vel") & (smoothype_target == "lambda"):
dwidth_sq = width_target**2 - (wave * width_in / ckms)**2
elif (smoothtype_in == "R") & (smoothype_target == "lambda"):
dwidth_sq = width_target**2 - (wave * width_in)**2
elif (smoothtype_in == "lambda") & (smoothtype_target == "vel"):
dwidth_sq = width_target**2 - (ckms * width_in / wave)**2
elif (smoothtype_in == "lambda") & (smoothtype_target == "R"):
dwidth_sq = width_target**2 - (width_in / wave)**2
elif (smoothtype_in == "R") & (smoothtype_target == "vel"):
print("srsly?")
return None
elif (smoothtype_in == "vel") & (smoothtype_target == "R"):
print("srsly?")
return None
if np.any(dwidth_sq <= 0):
print("Warning: Desired resolution is better than input resolution")
dwidth_sq = np.clip(dwidth_sq, 0, np.inf)
if smoothtype_target == "R":
return 1.0 / np.sqrt(dwidth_sq)
else:
return np.sqrt(dwidth_sq)
return delta_width
|
bd-j/prospector
|
prospect/utils/smoothing.py
|
Python
|
mit
| 24,897 | 0.000843 |
# -*- coding: utf-8 -*-
'''Python command line tool to maange a local cache of content fom DataONE.
Output is a folder with structure:
cache/
meta.json: Basic metadata about the content in the cache
index.json: An index to entries in the cache. Downlaods are renamed using a
hash of the identifier as the identifier is not file system safe
0/
.
.
f/
Note that this process runs as a single thread and so will take quite a while
to complete.
Note also that the libraries used emit error messages that may be more
appropriately handled in logic. As a result the output from this script is
quite verbose, though seems to work effecively.
Dependencies:
pip install -U dataone.libclient
# should install downstream dependencies
Use:
python d1_local_copy.py
'''
import logging
from d1_local_copy.local_copy_manager import LocalCopyManager
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# name of a folder that will contain the downloaded content,
cache_folder="cache"
# hostname of coordinating node to use
host="cn.dataone.org"
# Query to retrieve all METADATA entries that are not obsoleted
q = "formatType:METADATA AND -obsoletedBy:[* TO *]"
manager = LocalCopyManager(host=host)
#populate the cache, limiting the total downloads to max_records
manager.populate(q, max_records=1000)
|
DataONEorg/d1LocalCopy
|
d1_local_copy.py
|
Python
|
apache-2.0
| 1,400 | 0.015714 |
def passChecker(password):
import re
passlength = 8
uppercaseRegex = re.compile(r'[A-Z]')
lowercaseRegex = re.compile(r'[a-z]')
numberRegex = re.compile(r'[0-9]')
if ((uppercaseRegex.search(password) == None) or (lowercaseRegex.search(password) == None) or (numberRegex.search(password) == None) or (len(password) < passlength)):
return False
return True
|
Bennson/Projects
|
Automate the boring stuff/Chapter 7/strongPasswordDetection.py
|
Python
|
gpl-2.0
| 410 | 0.012195 |
import win32ras
stateStrings = {
win32ras.RASCS_OpenPort : "OpenPort",
win32ras.RASCS_PortOpened : "PortOpened",
win32ras.RASCS_ConnectDevice : "ConnectDevice",
win32ras.RASCS_DeviceConnected : "DeviceConnected",
win32ras.RASCS_AllDevicesConnected : "AllDevicesConnected",
win32ras.RASCS_Authenticate : "Authenticate",
win32ras.RASCS_AuthNotify : "AuthNotify",
win32ras.RASCS_AuthRetry : "AuthRetry",
win32ras.RASCS_AuthCallback : "AuthCallback",
win32ras.RASCS_AuthChangePassword : "AuthChangePassword",
win32ras.RASCS_AuthProject : "AuthProject",
win32ras.RASCS_AuthLinkSpeed : "AuthLinkSpeed",
win32ras.RASCS_AuthAck : "AuthAck",
win32ras.RASCS_ReAuthenticate : "ReAuthenticate",
win32ras.RASCS_Authenticated : "Authenticated",
win32ras.RASCS_PrepareForCallback : "PrepareForCallback",
win32ras.RASCS_WaitForModemReset : "WaitForModemReset",
win32ras.RASCS_WaitForCallback : "WaitForCallback",
win32ras.RASCS_Projected : "Projected",
win32ras.RASCS_StartAuthentication : "StartAuthentication",
win32ras.RASCS_CallbackComplete : "CallbackComplete",
win32ras.RASCS_LogonNetwork : "LogonNetwork",
win32ras.RASCS_Interactive : "Interactive",
win32ras.RASCS_RetryAuthentication : "RetryAuthentication",
win32ras.RASCS_CallbackSetByCaller : "CallbackSetByCaller",
win32ras.RASCS_PasswordExpired : "PasswordExpired",
win32ras.RASCS_Connected : "Connected",
win32ras.RASCS_Disconnected : "Disconnected"
}
def TestCallback( hras, msg, state, error, exterror):
print "Callback called with ", hras, msg, stateStrings[state], error, exterror
def test(rasName = "_ Divert Off"):
return win32ras.Dial(None, None, (rasName,),TestCallback)
|
JulienMcJay/eclock
|
windows/Python27/Lib/site-packages/pywin32-218-py2.7-win32.egg/rasutil.py
|
Python
|
gpl-2.0
| 1,737 | 0.039724 |
"""
Logger di varie info per ogni host
"""
from novaclient import client as novaclient
from ceilometerclient import client as ceiloclient
import os
from os import environ as env
import time
def start(hosts, sleep_sec, base_dir):
print 'You must be admin to use this script'
# start logger
time_dir = get_cur_formatted_time()
root_path = os.path.join(base_dir, time_dir)
keystone = {}
keystone['username'] = env['OS_USERNAME']
keystone['password'] = env['OS_PASSWORD']
keystone['auth_url'] = env['OS_AUTH_URL']
keystone['tenant_name'] = env['OS_TENANT_NAME']
nova = (novaclient.Client(3, keystone['username'], keystone['password'], keystone['tenant_name'],
keystone['auth_url'], service_type='compute'))
ceilo = (ceiloclient.get_client(2, username=keystone['username'], password=keystone['password'],
tenant_name=keystone['tenant_name'], auth_url=keystone['auth_url']))
flavor_list = nova.flavors.list()
flavor_dict = dict((flavor.id, flavor.name) for flavor in flavor_list)
while True:
for host in hosts:
host_id = '_'.join([host, host]) #host_node: computeX_computeX
log_info(nova, ceilo, host, host_id, root_path, flavor_dict)
time.sleep(sleep_sec)
def log_info(nova, ceilo, host, host_id, root_path, flavor_dict):
# log info every interval
path = os.path.join(root_path, host)
if not os.path.exists(path):
os.makedirs(path)
print path
log_meter_host_cpu_util(ceilo, host_id, path)
log_meter_host_mem_util(ceilo, host_id, path)
log_meter_host_cpu_mem(ceilo, host_id, path)
log_vms_host(nova, host, path, flavor_dict)
log_alarm_host_cpu_mem(ceilo, host_id, path)
def log_meter_host_cpu_util(ceilo, host_id, path):
# sample of cpu util in percentage
host_cpu_util = ceilo.samples.list(meter_name='host.cpu.util',
limit=1,
q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
host_cpu_util = (host_cpu_util[0].counter_volume)/100
content = get_string_to_write(str(host_cpu_util))
path_file = get_path_to_file(path, "meter_host_cpu_util")
write_file(path_file, content)
def log_meter_host_mem_util(ceilo, host_id, path):
# sample of ram usage in percentage
host_mem_usage = ceilo.samples.list(meter_name='host.memory.usage',
limit=1,
q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
host_mem_usage = (host_mem_usage[0].counter_volume)/100
content = get_string_to_write(str(host_mem_usage))
path_file = get_path_to_file(path, "meter_host_mem_util")
write_file(path_file, content)
def log_meter_host_cpu_mem(ceilo, host_id, path):
# sample of cpu-ram combined meter
host_cpu_mem_combo = ceilo.samples.list(meter_name='host.cpu.util.memory.usage',
limit=1,
q=[{'field':'resource_id',
'op':'eq',
'value':host_id}])
content = get_string_to_write(str(host_cpu_mem_combo[0].counter_volume))
path_file = get_path_to_file(path, "meter_host_cpu_mem")
write_file(path_file, content)
def log_alarm_host_cpu_mem(ceilo, host_id, path):
# overload and underload alarms
alarms = ceilo.alarms.list(q=[{'field':'meter',
'op':'eq',
'value':'host.cpu.util.memory.usage'}])
hostname = [x.strip() for x in host_id.split('_')][0]
for alarm in alarms:
name = alarm.name
state = alarm.state
#print hostname
#print name
if hostname in name:
name_state = ''
if state == 'ok':
name_state = name + ': ' + '0'
elif state == 'alarm':
name_state = name + ': ' + '1'
else:
name_state = name + ': ' + '2'
content = get_string_to_write(name_state)
if 'overload' in name:
path_file = get_path_to_file(path, "alarm_host_cpu_mem_overload")
write_file(path_file, content)
if 'underload' in name:
path_file = get_path_to_file(path, "alarm_host_cpu_mem_underload")
write_file(path_file, content)
path_file = get_path_to_file(path, "alarm_host_cpu_mem")
write_file(path_file, content)
content = get_string_to_write("**********")
path_file = get_path_to_file(path, "alarm_host_cpu_mem")
write_file(path_file, content)
def log_vms_host(nova, host, path, flavor_dict):
# vms in host
search_opts = {'host': host, 'all_tenants': True}
vms = nova.servers.list(search_opts=search_opts)
path_file = get_path_to_file(path, "vms")
id_flavor = [(vm.id, flavor_dict[vm.flavor['id']]) for vm in vms]
num_vms = len(vms)
content = get_string_to_write(str(num_vms) + ' , ' + str(id_flavor))
write_file(path_file, content)
def write_file(path_file, content):
out_file = open(path_file,"a")
out_file.write(str(content) + os.linesep)
out_file.close()
def get_path_to_file(path, filename):
return os.path.join(path, filename)
def get_string_to_write(content):
return ", ".join([get_cur_formatted_time(), content])
def get_cur_formatted_time():
cur_time = time.time()
formatted_time = time.strftime('%Y-%m-%dT%H:%M:%S',
time.localtime(cur_time))
return formatted_time
compute_hosts = ['compute02', 'compute03', 'compute04']
sleep_sec = 150
base_dir = "log"
start(compute_hosts, sleep_sec, base_dir)
|
MisterPup/OpenStack-Neat-Ceilometer
|
alarm_test/info_logger.py
|
Python
|
apache-2.0
| 6,034 | 0.009115 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
import unittest
import numpy as np
import os
from imtools import qmisc
from imtools import misc
#
class QmiscTest(unittest.TestCase):
interactivetTest = False
# interactivetTest = True
# @unittest.skip("waiting for implementation")
def test_suggest_filename(self):
"""
Testing some files. Not testing recursion in filenames. It is situation
if there exist file0, file1, file2 and input file is file
"""
filename = "mujsoubor"
# import ipdb; ipdb.set_trace() # BREAKPOINT
new_filename = misc.suggest_filename(filename, exists=True)
# self.assertTrue(new_filename == "mujsoubor2")
self.assertEqual(new_filename, "mujsoubor_2")
filename = "mujsoubor_112"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_113")
filename = "mujsoubor_2.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_3.txt")
filename = "mujsoubor27.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor27_2.txt")
filename = "mujsoubor-a24.txt"
new_filename = misc.suggest_filename(filename, exists=False)
self.assertEqual(new_filename, "mujsoubor-a24.txt", "Rewrite")
@unittest.skip("getVersionString is not used anymore")
def test_getVersionString(self):
"""
getVersionString is not used anymore
"""
vfn = "../__VERSION__"
existed = False
if not os.path.exists(vfn):
with open(vfn, 'a') as the_file:
the_file.write('1.1.1\n')
existed = False
verstr = qmisc.getVersionString()
self.assertTrue(type(verstr) == str)
if existed:
os.remove(vfn)
def test_obj_to_and_from_file_yaml(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.yaml'
misc.obj_to_file(test_object, filename, 'yaml')
saved_object = misc.obj_from_file(filename, 'yaml')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
def test_obj_to_and_from_file_pickle(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
# def test_obj_to_and_from_file_exeption(self):
# test_object = [1]
# filename = 'test_obj_to_and_from_file_exeption'
# self.assertRaises(misc.obj_to_file(test_object, filename ,'yaml'))
def test_obj_to_and_from_file_with_directories(self):
import shutil
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
dirname = '__test_write_and_read'
filename = '__test_write_and_read/test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
shutil.rmtree(dirname)
if __name__ == "__main__":
unittest.main()
|
mjirik/imtools
|
tests/qmisc_test.py
|
Python
|
mit
| 3,908 | 0.001281 |
# coding: utf-8
from .graphviz_wrapper import board, add_digraph, add_digraph_node, add_digraph_edge
from .jupyter_helper import jupyter_pan_and_zoom, jupyter_show_as_svg
__author__ = "akimach"
__version__ = "0.0.7"
__license__ = "MIT"
|
akimach/tfgraphviz
|
tfgraphviz/__init__.py
|
Python
|
mit
| 239 | 0.008368 |
import sys
import os
import socket
import tempfile
import subprocess
import shutil
import email
import threading
import gobject
import struct
import time
import re
sys.path.insert(0, "..")
if __name__=="__main__":
import gettext
gettext.install("D-RATS")
from d_rats import version
from d_rats import platform
from d_rats import formgui
from d_rats import utils
from d_rats import signals
from d_rats.ddt2 import calc_checksum
from d_rats.ui import main_events
from d_rats import agw
FBB_BLOCK_HDR = 1
FBB_BLOCK_DAT = 2
FBB_BLOCK_EOF = 4
FBB_BLOCK_TYPES = { FBB_BLOCK_HDR : "header",
FBB_BLOCK_DAT : "data",
FBB_BLOCK_EOF : "eof",
}
def escaped(string):
return string.replace("\n", r"\n").replace("\r", r"\r")
def run_lzhuf(cmd, data):
p = platform.get_platform()
cwd = tempfile.mkdtemp()
f = file(os.path.join(cwd, "input"), "wb")
f.write(data)
f.close()
kwargs = {}
if subprocess.mswindows:
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs["startupinfo"] = su
if os.name == "nt":
lzhuf = "LZHUF_1.EXE"
elif os.name == "darwin":
raise Exception("Not supported on MacOS")
else:
lzhuf = "lzhuf"
lzhuf_path = os.path.abspath(os.path.join(p.source_dir(), "libexec", lzhuf))
shutil.copy(os.path.abspath(lzhuf_path), cwd)
run = [lzhuf_path, cmd, "input", "output"]
print "Running %s in %s" % (run, cwd)
ret = subprocess.call(run, cwd=cwd, **kwargs)
print "LZHUF returned %s" % ret
if ret:
return None
f = file(os.path.join(cwd, "output"), "rb")
data = f.read()
f.close()
return data
def run_lzhuf_decode(data):
return run_lzhuf("d", data[2:])
def run_lzhuf_encode(data):
lzh = run_lzhuf("e", data)
lzh = struct.pack("<H", calc_checksum(lzh)) + lzh
return lzh
class WinLinkMessage:
def __init__(self, header=None):
self.__name = ""
self.__content = ""
self.__usize = self.__csize = 0
self.__id = ""
self.__type = "P"
if header:
fc, self.__type, self.__id, us, cs, off = header.split()
self.__usize = int(us)
self.__csize = int(cs)
if int(off) != 0:
raise Exception("Offset support not implemented")
def __decode_lzhuf(self, data):
return run_lzhuf_decode(data)
def __encode_lzhuf(self, data):
return run_lzhuf_encode(data)
def recv_exactly(self, s, l):
data = ""
while len(data) < l:
data += s.recv(l - len(data))
return data
def read_from_socket(self, s):
data = ""
i = 0
while True:
print "Reading at %i" % i
t = ord(self.recv_exactly(s, 1))
if chr(t) == "*":
msg = s.recv(1024)
raise Exception("Error getting message: %s" % msg)
if t not in FBB_BLOCK_TYPES.keys():
i += 1
print "Got %x (%c) while reading %i" % (t, chr(t), i)
continue
print "Found %s at %i" % (FBB_BLOCK_TYPES.get(t, "unknown"), i)
size = ord(self.recv_exactly(s, 1))
i += 2 # Account for the type and size
if t == FBB_BLOCK_HDR:
header = self.recv_exactly(s, size)
self.__name, offset, foo = header.split("\0")
print "Name is `%s' offset %s\n" % (self.__name, offset)
i += size
elif t == FBB_BLOCK_DAT:
print "Reading data block %i bytes" % size
data += self.recv_exactly(s, size)
i += size
elif t == FBB_BLOCK_EOF:
cs = size
for i in data:
cs += ord(i)
if (cs % 256) != 0:
print "Ack! %i left from cs %i" % (cs, size)
break
print "Got data: %i bytes" % len(data)
self.__content = self.__decode_lzhuf(data)
if self.__content is None:
raise Exception("Failed to decode compressed message")
if len(data) != self.__csize:
print "Compressed size %i != %i" % (len(data), self.__csize)
if len(self.__content) != self.__usize:
print "Uncompressed size %i != %i" % (len(self.__content),
self.__usize)
def send_to_socket(self, s):
data = self.__lzh_content
# filename \0 length(0) \0
header = self.__name + "\x00" + chr(len(data) & 0xFF) + "\x00"
s.send(struct.pack("BB", FBB_BLOCK_HDR, len(header)) + header)
sum = 0
while data:
chunk = data[:128]
data = data[128:]
for i in chunk:
sum += ord(i)
s.send(struct.pack("BB", FBB_BLOCK_DAT, len(chunk)) + chunk)
# Checksum, mod 256, two's complement
sum = (~sum & 0xFF) + 1
s.send(struct.pack("BB", FBB_BLOCK_EOF, sum))
def get_content(self):
return self.__content
def set_content(self, content, name="message"):
self.__name = name
self.__content = content
self.__lzh_content = self.__encode_lzhuf(content)
self.__usize = len(self.__content)
self.__csize = len(self.__lzh_content)
def get_id(self):
return self.__id
def set_id(self, id):
self.__id = id
def get_proposal(self):
return "FC %s %s %i %i 0" % (self.__type, self.__id,
self.__usize, self.__csize)
class WinLinkCMS:
def __init__(self, callsign):
self._callsign = callsign
self.__messages = []
self._conn = None
def __ssid(self):
return "[DRATS-%s-B2FHIM$]" % version.DRATS_VERSION
def _send(self, string):
print " -> %s" % string
self._conn.send(string + "\r")
def __recv(self):
resp = ""
while not resp.endswith("\r"):
resp += self._conn.recv(1)
print " <- %s" % escaped(resp)
return resp
def _recv(self):
r = ";"
while r.startswith(";"):
r = self.__recv()
return r;
def _send_ssid(self, recv_ssid):
try:
sw, ver, caps = recv_ssid[1:-1].split("-")
except Exception:
raise Exception("Conversation error (unparsable SSID `%s')" % resp)
self._send(self.__ssid())
prompt = self._recv().strip()
if not prompt.endswith(">"):
raise Exception("Conversation error (never got prompt)")
def __get_list(self):
self._send("FF")
msgs = []
reading = True
while reading:
resp = self._recv()
for l in resp.split("\r"):
if l.startswith("FC"):
print "Creating message for %s" % l
msgs.append(WinLinkMessage(l))
elif l.startswith("F>"):
reading = False
break
elif l.startswith("FQ"):
reading = False
break
elif not l:
pass
else:
print "Invalid line: %s" % l
raise Exception("Conversation error (%s while listing)" % l)
return msgs
def get_messages(self):
self._connect()
self._login()
self.__messages = self.__get_list()
if self.__messages:
self._send("FS %s" % ("Y" * len(self.__messages)))
for msg in self.__messages:
print "Getting message..."
try:
msg.read_from_socket(self._conn)
except Exception, e:
raise
#print e
self._send("FQ")
self._disconnect()
return len(self.__messages)
def get_message(self, index):
return self.__messages[index]
def send_messages(self, messages):
if len(messages) != 1:
raise Exception("Sorry, batch not implemented yet")
self._connect()
self._login()
cs = 0
for msg in messages:
p = msg.get_proposal()
for i in p:
cs += ord(i)
cs += ord("\r")
self._send(p)
cs = ((~cs & 0xFF) + 1)
self._send("F> %02X" % cs)
resp = self._recv()
if not resp.startswith("FS"):
raise Exception("Error talking to server: %s" % resp)
fs, accepts = resp.split()
if len(accepts) != len(messages):
raise Exception("Server refused some of my messages?!")
for msg in messages:
msg.send_to_socket(self._conn)
resp = self._recv()
self._disconnect()
return 1
class WinLinkTelnet(WinLinkCMS):
def __init__(self, callsign, server="server.winlink.org", port=8772):
self.__server = server
self.__port = port
WinLinkCMS.__init__(self, callsign)
def _connect(self):
class sock_file:
def __init__(self):
self.__s = 0
def read(self, len):
return self.__s.recv(len)
def write(self, buf):
return self.__s.send(buf)
def connect(self, spec):
return self.__s.connect(spec)
def close(self):
self.__s.close()
self._conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._conn.connect((self.__server, self.__port))
def _disconnect(self):
self._conn.close()
def _login(self):
resp = self._recv()
resp = self._recv()
if not resp.startswith("Callsign :"):
raise Exception("Conversation error (never saw login)")
self._send(self._callsign)
resp = self._recv()
if not resp.startswith("Password :"):
raise Exception("Conversation error (never saw password)")
self._send("CMSTELNET")
resp = self._recv()
self._send_ssid(resp)
class WinLinkRMSPacket(WinLinkCMS):
def __init__(self, callsign, remote, agw):
self.__remote = remote
self.__agw = agw
WinLinkCMS.__init__(self, callsign)
def _connect(self):
self._conn = agw.AGW_AX25_Connection(self.__agw, self._callsign)
self._conn.connect(self.__remote)
def _disconnect(self):
self._conn.disconnect()
def _login(self):
resp = self._recv()
self._send_ssid(resp)
class WinLinkThread(threading.Thread, gobject.GObject):
__gsignals__ = {
"mail-thread-complete" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_BOOLEAN, gobject.TYPE_STRING)),
"event" : signals.EVENT,
"form-received" : signals.FORM_RECEIVED,
"form-sent" : signals.FORM_SENT,
}
_signals = __gsignals__
def _emit(self, *args):
gobject.idle_add(self.emit, *args)
def __init__(self, config, callsign, callssid=None, send_msgs=[]):
threading.Thread.__init__(self)
self.setDaemon(True)
gobject.GObject.__init__(self)
if not callssid:
callssid = callsign
self._config = config
self._callsign = callsign
self._callssid = callssid
self.__send_msgs = send_msgs
def __create_form(self, msg):
mail = email.message_from_string(msg.get_content())
sender = mail.get("From", "Unknown")
if ":" in sender:
method, sender = sender.split(":", 1)
sender = "WL2K:" + sender
if self._callsign == self._config.get("user", "callsign"):
box = "Inbox"
else:
box = "Outbox"
template = os.path.join(self._config.form_source_dir(),
"email.xml")
formfn = os.path.join(self._config.form_store_dir(),
box, "%s.xml" % msg.get_id())
form = formgui.FormFile(template)
form.set_field_value("_auto_sender", sender)
form.set_field_value("recipient", self._callsign)
form.set_field_value("subject", mail.get("Subject", "Unknown"))
form.set_field_value("message", mail.get_payload())
form.set_path_src(sender.strip())
form.set_path_dst(self._callsign)
form.set_path_mid(msg.get_id())
form.add_path_element("@WL2K")
form.add_path_element(self._config.get("user", "callsign"))
form.save_to(formfn)
return formfn
def _run_incoming(self):
wl = self.wl2k_connect()
count = wl.get_messages()
for i in range(0, count):
msg = wl.get_message(i)
formfn = self.__create_form(msg)
self._emit("form-received", -999, formfn)
if count:
result = "Queued %i messages" % count
else:
result = "No messages"
return result
def _run_outgoing(self):
server = self._config.get("prefs", "msg_wl2k_server")
port = self._config.getint("prefs", "msg_wl2k_port")
wl = self.wl2k_connect()
for mt in self.__send_msgs:
m = re.search("Mid: (.*)\r\nSubject: (.*)\r\n", mt)
if m:
mid = m.groups()[0]
subj = m.groups()[1]
else:
mid = time.strftime("%H%M%SDRATS")
subj = "Message"
wlm = WinLinkMessage()
wlm.set_id(mid)
wlm.set_content(mt, subj)
print m
print mt
wl.send_messages([wlm])
#self._emit("form-sent", -999,
return "Complete"
def run(self):
if self.__send_msgs:
result = self._run_outgoing()
else:
result = self._run_incoming()
self._emit("mail-thread-complete", True, result)
class WinLinkTelnetThread(WinLinkThread):
def __init__(self, *args, **kwargs):
WinLinkThread.__init__(self, *args, **kwargs)
def wl2k_connect(self):
server = self._config.get("prefs", "msg_wl2k_server")
port = self._config.getint("prefs", "msg_wl2k_port")
return WinLinkTelnet(self._callssid, server, port)
class WinLinkAGWThread(WinLinkThread):
def __init__(self, *args, **kwargs):
WinLinkThread.__init__(self, *args, **kwargs)
self.__agwconn = None
def set_agw_conn(self, agwconn):
self.__agwconn = agwconn
def wl2k_connect(self):
remote = self._config.get("prefs", "msg_wl2k_rmscall")
return WinLinkRMSPacket(self._callssid, remote, self.__agwconn)
def wl2k_auto_thread(ma, *args, **kwargs):
mode = ma.config.get("settings", "msg_wl2k_mode")
#May need for AGW
#call = config.get("user", "callsign")
print "WL2K Mode is: %s" % mode
if mode == "Network":
mt = WinLinkTelnetThread(ma.config, *args, **kwargs)
elif mode == "RMS":
# TEMPORARY
port = ma.config.get("prefs", "msg_wl2k_rmsport")
if not ma.sm.has_key(port):
raise Exception("No such AGW port %s for WL2K" % port)
a = ma.sm[port][0].pipe.get_agw_connection()
#a = agw.AGWConnection("127.0.0.1", 8000, 0.5)
mt = WinLinkAGWThread(ma.config, *args, **kwargs)
mt.set_agw_conn(a)
else:
raise Exception("Unknown WL2K mode: %s" % mode)
return mt
if __name__=="__main__":
if True:
#wl = WinLinkTelnet("KK7DS", "sandiego.winlink.org")
agwc = agw.AGWConnection("127.0.0.1", 8000, 0.5)
wl = WinLinkRMSPacket("KK7DS", "N7AAM-11", agwc)
count = wl.get_messages()
print "%i messages" % count
for i in range(0, count):
print "--Message %i--\n%s\n--End--\n\n" % (i, wl.get_message(i).get_content())
else:
text = "This is a test!"
_m = """Mid: 12345_KK7DS\r
From: KK7DS\r
To: dsmith@danplanet.com\r
Subject: This is a test\r
Body: %i\r
\r
%s
""" % (len(text), text)
m = WinLinkMessage()
m.set_id("1234_KK7DS")
m.set_content(_m)
wl = WinLinkTelnet("KK7DS")
wl.send_messages([m])
|
maurizioandreotti/D-Rats-0.3.ev
|
d_rats/wl2k.py
|
Python
|
gpl-3.0
| 16,452 | 0.003404 |
from __future__ import division, print_function
import numpy as np
import nose.tools as nt
import regreg.api as rr
from ..group_lasso import (group_lasso,
selected_targets,
full_targets,
debiased_targets)
from ...tests.instance import gaussian_instance
from ...tests.flags import SET_SEED
from ...tests.decorators import set_sampling_params_iftrue, set_seed_iftrue
from ...algorithms.sqrt_lasso import choose_lambda, solve_sqrt_lasso
from ..randomization import randomization
from ...tests.decorators import rpy_test_safe
@set_seed_iftrue(SET_SEED)
def test_group_lasso(n=400,
p=100,
signal_fac=3,
s=5,
sigma=3,
target='full',
rho=0.4,
randomizer_scale=.75,
ndraw=100000):
"""
Test group lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
orthogonal = True
if orthogonal:
X = np.linalg.svd(X, full_matrices=False)[0]
Y = X.dot(beta) + sigma * np.random.standard_normal(n)
n, p = X.shape
sigma_ = np.std(Y)
groups = np.floor(np.arange(p)/2).astype(np.int)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights,
randomizer_scale=randomizer_scale * sigma_)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_lasso(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with groups of size 1, ie lasso
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_mixed(n=400,
p=200,
signal_fac=1.5,
s=5,
sigma=3,
target='full',
rho=0.4,
ndraw=10000):
"""
Test group lasso with a mix of groups of size 1, and larger
"""
inst, const = gaussian_instance, group_lasso.gaussian
signal = np.sqrt(signal_fac * np.log(p))
X, Y, beta = inst(n=n,
p=p,
signal=signal,
s=s,
equicorrelated=False,
rho=rho,
sigma=sigma,
random_signs=True)[:3]
n, p = X.shape
sigma_ = np.std(Y)
groups = np.arange(p)
groups[-5:] = -1
groups[-8:-5] = -2
Y += X[:,-8:].dot(np.ones(8)) * 5 # so we select the last two groups
weights = dict([(i, sigma_ * 2 * np.sqrt(2)) for i in np.unique(groups)])
conv = const(X,
Y,
groups,
weights)
signs = conv.fit()
nonzero = conv.selection_variable['directions'].keys()
if target == 'full':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = full_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'selected':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = selected_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
elif target == 'debiased':
(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives) = debiased_targets(conv.loglike,
conv._W,
nonzero,
conv.penalty)
_, pval, intervals = conv.summary(observed_target,
group_assignments,
cov_target,
cov_target_score,
alternatives,
ndraw=ndraw,
compute_intervals=False)
which = np.zeros(p, np.bool)
for group in conv.selection_variable['directions'].keys():
which_group = conv.penalty.groups == group
which += which_group
return pval[beta[which] == 0], pval[beta[which] != 0]
@set_seed_iftrue(SET_SEED)
def test_all_targets(n=100, p=20, signal_fac=1.5, s=5, sigma=3, rho=0.4):
for target in ['full', 'selected', 'debiased']:
test_group_lasso(n=n,
p=p,
signal_fac=signal_fac,
s=s,
sigma=sigma,
rho=rho,
target=target)
def main(nsim=500, n=200, p=50, target='full', sigma=3):
import matplotlib.pyplot as plt
P0, PA = [], []
from statsmodels.distributions import ECDF
for i in range(nsim):
try:
p0, pA = test_group_lasso(n=n, p=p, target=target, sigma=sigma)
except:
pass
print(len(p0), len(pA))
P0.extend(p0)
PA.extend(pA)
P0_clean = np.array(P0)
P0_clean = P0_clean[P0_clean > 1.e-5] #
print(np.mean(P0_clean), np.std(P0_clean), np.mean(np.array(PA) < 0.05), np.sum(np.array(PA) < 0.05) / (i+1), np.mean(np.array(P0) < 0.05), np.mean(P0_clean < 0.05), np.mean(np.array(P0) < 1e-5), 'null pvalue + power + failure')
if i % 3 == 0 and i > 0:
U = np.linspace(0, 1, 101)
plt.clf()
if len(P0_clean) > 0:
plt.plot(U, ECDF(P0_clean)(U))
if len(PA) > 0:
plt.plot(U, ECDF(PA)(U), 'r')
plt.plot([0, 1], [0, 1], 'k--')
plt.savefig("plot.pdf")
plt.show()
|
selective-inference/selective-inference
|
selectinf/randomized/tests/test_group_lasso.py
|
Python
|
bsd-3-clause
| 10,569 | 0.011922 |
"""Mac-only module to find the home file of a resource."""
import sstruct
import array
import calldll
import macfs, Res
def HomeResFile(res):
"""Return a path to the file in which resource 'res' lives."""
return GetFileLocation(res.HomeResFile())
def GetFileLocation(refNum):
"""Return a path to the open file identified with refNum."""
pb = ParamBlock(refNum)
return pb.getPath()
#
# Internal cruft, adapted from MoreFiles
#
_InterfaceLib = calldll.getlibrary("InterfaceLib")
GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort")
_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong")
_FCBPBFormat = """
qLink: l
qType: h
ioTrap: h
ioCmdAddr: l
ioCompletion: l
ioResult: h
ioNamePtr: l
ioVRefNum: h
ioRefNum: h
filler: h
ioFCBIndx: h
filler1: h
ioFCBFINm: l
ioFCBFlags: h
ioFCBStBlk: h
ioFCBEOF: l
ioFCBPLen: l
ioFCBCrPs: l
ioFCBVRefNum: h
ioFCBClpSiz: l
ioFCBParID: l
"""
class ParamBlock:
"""Wrapper for the very low level FCBPB record."""
def __init__(self, refNum):
self.__fileName = array.array("c", "\0" * 64)
sstruct.unpack(_FCBPBFormat,
"\0" * sstruct.calcsize(_FCBPBFormat), self)
self.ioNamePtr = self.__fileName.buffer_info()[0]
self.ioRefNum = refNum
self.ioVRefNum = GetVRefNum(refNum)
self.__haveInfo = 0
def getInfo(self):
if self.__haveInfo:
return
data = sstruct.pack(_FCBPBFormat, self)
buf = array.array("c", data)
ptr = buf.buffer_info()[0]
err = _getInfo(ptr)
if err:
raise Res.Error, ("can't get file info", err)
sstruct.unpack(_FCBPBFormat, buf.tostring(), self)
self.__haveInfo = 1
def getFileName(self):
self.getInfo()
data = self.__fileName.tostring()
return data[1:ord(data[0])+1]
def getFSSpec(self):
self.getInfo()
vRefNum = self.ioVRefNum
parID = self.ioFCBParID
return macfs.FSSpec((vRefNum, parID, self.getFileName()))
def getPath(self):
return self.getFSSpec().as_pathname()
if __name__ == "__main__":
fond = Res.GetNamedResource("FOND", "Helvetica")
print HomeResFile(fond)
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/FontTools/fontTools/misc/homeResFile.py
|
Python
|
lgpl-3.0
| 2,148 | 0.035847 |
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((14490.9, 3029.12, 3060.83), (0.7, 0.7, 0.7), 507.685)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((15116.6, 3760.52, 2692.79), (0.7, 0.7, 0.7), 479.978)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((13383.1, 4090.52, 3479.5), (0.7, 0.7, 0.7), 681.834)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((11225.1, 4429.85, 4322.38), (0.7, 0.7, 0.7), 522.532)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((10554.9, 4586.88, 4596.63), (0, 1, 0), 751.925)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((12326, 4639.28, 5766.95), (0.7, 0.7, 0.7), 437.001)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((11370.4, 6195.88, 6398.83), (0.7, 0.7, 0.7), 710.767)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((12294.8, 7399.36, 7205.16), (0.7, 0.7, 0.7), 762.077)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((11975.4, 8902.22, 7425.72), (0.7, 0.7, 0.7), 726.799)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((11370.5, 10459.7, 8176.74), (0.7, 0.7, 0.7), 885.508)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((11026.4, 11464.8, 6823.83), (0.7, 0.7, 0.7), 778.489)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((12450.8, 13012.5, 6529.84), (0.7, 0.7, 0.7), 790.333)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((13874.2, 14489.8, 6268.22), (0.7, 0.7, 0.7), 707.721)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((14235.6, 13007.5, 5753.75), (0.7, 0.7, 0.7), 651.166)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((13215.9, 13710.7, 6892.86), (0.7, 0.7, 0.7), 708.61)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((11980.5, 13227.6, 7763.68), (0.7, 0.7, 0.7), 490.595)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((11432, 11904.7, 8032.92), (0.7, 0.7, 0.7), 591.565)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((10650.9, 10579.8, 8390.35), (0.7, 0.7, 0.7), 581.287)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((11842.6, 9383.56, 9068.83), (0.7, 0.7, 0.7), 789.529)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((11007.5, 8508.02, 10007.1), (0.7, 0.7, 0.7), 623.587)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((9961.01, 7958.91, 11407.6), (0.7, 0.7, 0.7), 1083.56)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9762.12, 8128.95, 13085.8), (0.7, 0.7, 0.7), 504.258)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9105.04, 8055.92, 11818.5), (0.7, 0.7, 0.7), 805.519)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8098.96, 8945.37, 10136.6), (0.7, 0.7, 0.7), 631.708)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((7113.44, 10486.1, 9008.71), (0.7, 0.7, 0.7), 805.942)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((6624.41, 11283.8, 8528.05), (1, 0.7, 0), 672.697)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((5149.07, 9466.37, 7162.96), (0.7, 0.7, 0.7), 797.863)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3504.19, 8941.41, 6449.57), (1, 0.7, 0), 735.682)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((3197.3, 7852.11, 7001.47), (0.7, 0.7, 0.7), 602.14)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2170.99, 6197.96, 8273.91), (0.7, 0.7, 0.7), 954.796)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2865.17, 6382.4, 7807.34), (0.7, 0.7, 0.7), 1021.88)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1678.42, 6425.8, 7006.61), (0.7, 0.7, 0.7), 909.323)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((135.134, 4970.61, 6205.73), (0.7, 0.7, 0.7), 621.049)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((571.58, 4380.79, 4957.66), (0.7, 0.7, 0.7), 525.154)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((1541.14, 3448.69, 4309.93), (0.7, 0.7, 0.7), 890.246)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1849.42, 1776.94, 3839.31), (0.7, 0.7, 0.7), 671.216)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((2383.37, 290.48, 4448.44), (0.7, 0.7, 0.7), 662.672)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((2421.7, 991.476, 5898.51), (0.7, 0.7, 0.7), 646.682)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((1032.67, 1633.14, 5847.07), (0.7, 0.7, 0.7), 769.945)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1237.16, 3532.79, 5257.35), (0.7, 0.7, 0.7), 606.92)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((553.737, 3534.85, 4213.84), (0.7, 0.7, 0.7), 622.571)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((1601.81, 4124.33, 4851.28), (0.7, 0.7, 0.7), 466.865)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2243.52, 3644.67, 4704.5), (0.7, 0.7, 0.7), 682.933)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((1631.44, 3925.35, 4647.59), (0.7, 0.7, 0.7), 809.326)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1142.92, 5369.1, 5674.5), (0.7, 0.7, 0.7), 796.72)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((2626.92, 7819.11, 5319.34), (0.7, 0.7, 0.7), 870.026)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((3047.34, 9026.17, 3950.98), (0.7, 0.7, 0.7), 909.577)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((3457.58, 9145.33, 2883.25), (0, 1, 0), 500.536)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((2644.32, 8950.16, 1073.97), (0.7, 0.7, 0.7), 725.276)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((846.027, 9108.82, -893.839), (0.7, 0.7, 0.7), 570.331)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((-6.51386, 7800.76, -208.332), (0.7, 0.7, 0.7), 492.203)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((-40.7095, 8772.6, 2535.54), (0, 1, 0), 547.7)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((421.267, 8145, 2491.85), (0.7, 0.7, 0.7), 581.921)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((115.381, 6285.36, 2181.62), (0.7, 0.7, 0.7), 555.314)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((658.606, 4858.23, 1911.62), (0.7, 0.7, 0.7), 404.219)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((2406.42, 4665.42, 2429.8), (0.7, 0.7, 0.7), 764.234)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
batxes/4Cin
|
Six_zebra_models/Six_zebra_models_final_output_0.1_-0.1_13000/mtx1_models/Six_zebra_models30110.py
|
Python
|
gpl-3.0
| 13,927 | 0.025203 |
import json
from multiprocessing import Pool, Manager
import os
import requests
import Quandl as quandl
# set working directory to script directory.
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
errors = []
def get_url(url, vars=None):
if vars is not None:
var_string = '?'
for key, value in vars.items():
var_string += '{0}={1}&'.format(key, value)
var_string = var_string[:-1]
url += var_string
return {'url': url, 'page': vars['page']}
def get_csv(url):
results = requests.get(url['url'])
fname = os.path.join(dname, 'meta/zhv_index/{0}.csv'.format(url['page']))
print(fname)
with open(fname, 'w') as f:
for d in results['datasets']:
f.write(l + '\n')
return
def main():
requests = []
url = 'https://www.quandl.com/api/v3/datasets.csv'
for i in range(1, 12663):
vars = {'database_code': 'ZILL',
'per_page': '100',
'sort_by': 'id',
'page': str(i),
'api_key': 'sWyovn27HuCobNWR2xyz'}
requests.append(dict(url=get_url(url, vars), id=str(i))
pool = Pool(8)
pool.map(get_csv, urls)
pool.close()
pool.join()
print('Errors: ' + errors)
if __name__ == '__main__':
main()
|
pjryan126/solid-start-careers
|
store/api/zillow/extract.py
|
Python
|
gpl-2.0
| 1,175 | 0.043404 |
import os, sys
from array import array
try:
from distance import cdistance
except ImportError:
cdistance = None
from distance import _pyimports as pydistance
if sys.version_info.major < 3:
t_unicode = unicode
t_bytes = lambda s: s
else:
t_unicode = lambda s: s
t_bytes = lambda s: s.encode()
all_types = [
("unicode", t_unicode),
("bytes", t_bytes),
("list", list),
("tuple", tuple),
]
def hamming(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty string
assert func(t(""), t("")) == 0
# common
assert func(t("abc"), t("abc")) == 0
assert func(t("abc"), t("abd")) == 1
# wrong length
try:
func(t("foo"), t("foobar"))
except ValueError:
pass
try:
func(t(""), t("foo"))
except ValueError:
pass
# normalization
assert func(t(""), t(""), normalized=True) == 0.0
assert func(t("abc"), t("abc"), normalized=True) == 0.0
assert func(t("ab"), t("ac"), normalized=True) == 0.5
assert func(t("abc"), t("def"), normalized=True) == 1.0
def fast_comp(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t("")) == 0
assert func(t(""), t("a")) == func(t("a"), t("")) == 1
# edit ops
assert func(t("aa"), t("aa")) == 0
assert func(t("ab"), t("aa")) == 1
assert func(t("ab"), t("a")) == 1
assert func(t("ab"), t("abc")) == 1
# dist limit
assert func(t("a"), t("bcd")) == func(t("bcd"), t("a")) == -1
# transpositions
assert func(t("abc"), t("bac"), transpositions=True) == \
func(t("bac"), t("abc"), transpositions=True) == 1
def levenshtein(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t("")) == 0
assert func(t(""), t("abcd")) == func(t("abcd"), t("")) == 4
# edit ops
assert func(t("aa"), t("aa")) == 0
assert func(t("ab"), t("aa")) == 1
assert func(t("ab"), t("a")) == 1
assert func(t("ab"), t("abc")) == 1
# dist limit
assert func(t("a"), t("b"), max_dist=0) == -1
assert func(t("a"), t("b"), max_dist=1) == 1
assert func(t("foo"), t("bar"), max_dist=-1) == 3
def nlevenshtein(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
assert func(t(""), t(""), 1) == func(t(""), t(""), 2) == 0.0
assert func(t(""), t("foo"), 1) == func(t("foo"), t(""), 1) == \
func(t(""), t("foo"), 2) == func(t("foo"), t(""), 2) == 1.0
assert func(t("aa"), t("aa"), 1) == func(t("aa"), t("aa"), 2) == 0.0
assert func(t("ab"), t("aa"), 1) == func(t("ab"), t("aa"), 2) == 0.5
assert func(t("ab"), t("a"), 1) == func(t("ab"), t("a"), 2) == 0.5
assert func(t("ab"), t("abc"), 1) == func(t("ab"), t("abc"), 2) == 0.3333333333333333
# multiple alignments
assert func(t("abc"), t("adb"), 1) == 0.6666666666666666
assert func(t("abc"), t("adb"), 2) == 0.5
def lcsubstrings(func, t, **kwargs):
# types; only for c
if kwargs["lang"] == "C":
try:
func(1, t("foo"))
except ValueError:
pass
try:
func(t("foo"), 1)
except ValueError:
pass
# empty strings
try:
assert func(t(""), t(""), False) == set()
except TypeError:
if t is not list: raise
assert func(t(""), t(""), True) == (0, ())
try:
assert func(t(""), t("foo"), False) == func(t("foo"), t(""), False) == set()
except TypeError:
if t is not list: raise
assert func(t(""), t("foo"), True) == func(t("foo"), t(""), True) == (0, ())
# common
try:
assert func(t("abcd"), t("cdba"), False) == {t('cd')}
except TypeError:
if t is not list: raise
assert func(t("abcd"), t("cdba"), True) == (2, ((2, 0),))
# reverse
try:
assert func(t("abcdef"), t("cdba"), False) == func(t("cdba"), t("abcdef"), False)
except TypeError:
if t is not list: raise
assert func(t("abcdef"), t("cdba"), True) == func(t("cdba"), t("abcdef"), True)
def itors_common(func, t, **kwargs):
if kwargs["lang"] == "C":
# types check; only need to do it for C impl to avoid an eventual segfaults.
try: func(1, t("foo"))
except ValueError: pass
itor = func(t("foo"), [t("foo"), 3333])
next(itor)
try: next(itor)
except ValueError: pass
# values drop
itor = func(t("aa"), [t("aa"), t("abcd"), t("ba")])
assert next(itor) == (0, t("aa"))
assert next(itor) == (1, t("ba"))
def ilevenshtein(func, t, **kwargs):
itors_common(lambda a, b: func(a, b, max_dist=2), t, **kwargs)
def ifast_comp(func, t, **kwargs):
itors_common(func, t, **kwargs)
#transpositions
g = func(t("abc"), [t("bac")], transpositions=False)
assert next(g) == (2, t('bac'))
g = func(t("abc"), [t("bac")], transpositions=True)
assert next(g) == (1, t("bac"))
write = lambda s: sys.stderr.write(s + '\n')
tests = ["hamming", "fast_comp", "levenshtein", "lcsubstrings", "nlevenshtein", "ilevenshtein", "ifast_comp"]
def run_test(name):
if cdistance:
cfunc = getattr(cdistance, name)
run_lang_test(name, cfunc, "C")
write("")
pyfunc = getattr(pydistance, name)
run_lang_test(name, pyfunc, "py")
if cdistance is None:
write("skipped C tests")
write("")
def run_lang_test(name, func, lang):
print("%s (%s)..." % (name, lang))
for tname, typ in all_types:
write("type: %s" % tname)
globals()[name](func, typ, lang=lang)
if __name__ == "__main__":
args = sys.argv[1:]
if not args:
for test in tests:
run_test(test)
sys.exit()
for name in args:
if name in tests:
run_test(name)
else:
write("no such test: %s" % name)
sys.exit(1)
|
doukremt/distance
|
tests/tests.py
|
Python
|
gpl-2.0
| 5,836 | 0.038897 |
import json
from util import d
import os
__home = os.path.expanduser("~").replace('\\', '/') + "/PixelWeb/"
BASE_SERVER_CONFIG = d({
"id":"server_config",
"display": "server_config",
"preconfig": False,
"presets":[],
"params": [{
"id": "external_access",
"label": "Allow External Access",
"type": "bool",
"default": True,
"help":"On: Other computers on your network can access PixelWeb. Off: LocalHost access only."
},{
"id": "port",
"label": "Server Port",
"type": "int",
"default": 8080,
"help":"Port to listen on."
},{
"id": "load_defaults",
"label": "Load Last Config on Start",
"type": "bool",
"default": False,
"help":"Load last driver/controller configuration on application start."
},
{
"id": "show_debug",
"label": "Show Debug in Console",
"type": "bool",
"default": False,
"help":"Show BiblioPixel debug in server console (not in main UI)."
},{
"id": "mod_dirs",
"label": "Module Directories",
"type": "str_multi",
"default": [],
"help":"Directories from which to load modules (animations, drivers, controllers, pre-configs).",
"replace": {"\\":"/"}
},
{
"id": "off_anim_time",
"label": "All Off Timeout",
"type": "int",
"default": 10,
"min": 0,
"max": 3600,
"help":"Keep display off when not running an animation by actively turning all pixels off every X seconds. Set to 0 to disable."
},]
});
def setHome(home):
global __home
__home = home
def genDefaultConfig(params):
c = {}
for p in params:
p = d(p)
c[p.id] = p.default
return c
def initConfig():
try:
if not os.path.exists(__home):
print "Creating {}".format(__home)
os.makedirs(__home)
except:
print "Failed to initialize PixelWeb config!"
def readConfig(file, key = None, path=None):
if not path:
path = __home
data = {}
try:
with open(path + "/" + file + ".json", "r") as fp:
data = json.load(fp, encoding='utf-8')
if key:
if key in data:
data = data[key]
else:
data = {}
except Exception, e:
pass
return d(data)
def writeConfig(file, data, key = None, path=None):
if not path:
path = __home
base = data
if key:
base = readConfig(file, path=path)
base[key] = data
with open(path + "/" + file + ".json", "w") as fp:
json.dump(base, fp, indent=4, sort_keys=True)
def paramsToDict(params):
data = {}
for p in params:
if "default" not in p:
p.default = None
data[p.id] = p.default
return data
def readServerConfig():
data = readConfig("config", path=__home)
base = paramsToDict(BASE_SERVER_CONFIG.params)
if len(data.keys()) == 0:
data = paramsToDict(BASE_SERVER_CONFIG.params)
elif len(data.keys()) != len(base.keys()):
data.upgrade(base)
return d(data)
def writeServerConfig(data):
writeConfig("config", data)
def upgradeServerConfig():
b = genDefaultConfig(BASE_SERVER_CONFIG.params)
cfg = readServerConfig()
cfg.upgrade(b)
writeServerConfig(cfg)
|
ManiacalLabs/PixelWeb
|
pixelweb/config.py
|
Python
|
mit
| 3,791 | 0.008441 |
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from nova.scheduler import filters
from nova.scheduler.filters import extra_specs_ops
LOG = logging.getLogger(__name__)
class ComputeCapabilitiesFilter(filters.BaseHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
# Instance type and host capabilities do not change within a request
run_filter_once_per_request = True
def _get_capabilities(self, host_state, scope):
cap = host_state
for index in range(0, len(scope)):
try:
if isinstance(cap, six.string_types):
try:
cap = jsonutils.loads(cap)
except ValueError as e:
LOG.debug("%(host_state)s fails. The capabilities "
"'%(cap)s' couldn't be loaded from JSON: "
"%(error)s",
{'host_state': host_state, 'cap': cap,
'error': e})
return None
if not isinstance(cap, dict):
if getattr(cap, scope[index], None) is None:
# If can't find, check stats dict
cap = cap.stats.get(scope[index], None)
else:
cap = getattr(cap, scope[index], None)
else:
cap = cap.get(scope[index], None)
except AttributeError as e:
LOG.debug("%(host_state)s fails. The capabilities couldn't "
"be retrieved: %(error)s.",
{'host_state': host_state, 'error': e})
return None
if cap is None:
LOG.debug("%(host_state)s fails. There are no capabilities "
"to retrieve.",
{'host_state': host_state})
return None
return cap
def _satisfies_extra_specs(self, host_state, instance_type):
"""Check that the host_state provided by the compute service
satisfies the extra specs associated with the instance type.
"""
if 'extra_specs' not in instance_type:
return True
for key, req in instance_type.extra_specs.items():
# Either not scope format, or in capabilities scope
scope = key.split(':')
# If key does not have a namespace, the scope's size is 1, check
# whether host_state contains the key as an attribute. If not,
# ignore it. If it contains, deal with it in the same way as
# 'capabilities:key'. This is for backward-compatible.
# If the key has a namespace, the scope's size will be bigger than
# 1, check that whether the namespace is 'capabilities'. If not,
# ignore it.
if len(scope) == 1:
stats = getattr(host_state, 'stats', {})
has_attr = hasattr(host_state, key) or key in stats
if not has_attr:
continue
else:
if scope[0] != "capabilities":
continue
else:
del scope[0]
cap = self._get_capabilities(host_state, scope)
if cap is None:
return False
if not extra_specs_ops.match(str(cap), req):
LOG.debug("%(host_state)s fails extra_spec requirements. "
"'%(req)s' does not match '%(cap)s'",
{'host_state': host_state, 'req': req,
'cap': cap})
return False
return True
def host_passes(self, host_state, spec_obj):
"""Return a list of hosts that can create instance_type."""
instance_type = spec_obj.flavor
if not self._satisfies_extra_specs(host_state, instance_type):
LOG.debug("%(host_state)s fails instance_type extra_specs "
"requirements", {'host_state': host_state})
return False
return True
|
rajalokan/nova
|
nova/scheduler/filters/compute_capabilities_filter.py
|
Python
|
apache-2.0
| 4,838 | 0 |
#!/usr/bin/env python
"""
.. module:: camerastation.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results. GPL v2.0 license Copyright (C)
2010- Zoltan Siki <siki.zoltan@epito.bme.hu>
.. moduleauthor:: Bence Turak <bence.turak@gmail.com>
"""
import sys
#sys.path.append('ulyxes/pyapi/')
#sys.path.append('lib/')
from totalstation import TotalStation
#from serialiface import SerialIface
from camera import Camera
#from steppermotor import StepperMotor
from imgprocess import ImgProcess
import numpy as np
import os
import cv2
import recognition as rec
from angle import Angle
import math
import time
class CameraStation(TotalStation, Camera):
'''CameraStation class for TotalStation combinated with camera
:param name: name of instrument
:param measureUnit: measure unit part of instrument
:param measureIface: interface to physical unit
:param writerUnit: store data, default None
'''
#constants
#FOCUS_CLOSER = 1
#FOCUS_FARTHER = 2
def __init__(self, name, measureUnit, measureIface, writerUnit = None):
'''constructor
'''
TotalStation.__init__(self, name, measureUnit, measureIface, writerUnit)
Camera.__init__(self, name, measureUnit, measureIface, writerUnit)
#StepperMotor.__init__(self, stepperMotorUnit, speed, halfSteps)
self._affinParams = None
def LoadAffinParams(self, file):
"""Load affin params to measure on pictures
:param file: name of the params file (It have to be .npy file)
"""
self._affinParams = np.load(file)
def PicMes(self, photoName, targetType = None):
'''Measure angles between the target and the optical axis
:param photoName: name of the photo
:param targetType: type of the target
:returns: horizontal (hz) and vertical (v) correction angle in dictionary
'''
ok = False
while not ok:
print(photoName)
file = open(photoName, 'w+b')
print((int(self._affinParams[0,3]), int(self._affinParams[1,3])))
ang = self.GetAngles()
self.TakePhoto(file, (int(self._affinParams[0,3]), int(self._affinParams[1,3])))
file.close()
try:
img = cv2.imread(photoName, 1)
picCoord = rec.recogChessPattern(img)
print(picCoord)
ok = True
except Exception:
pass
img[int(picCoord[1]),:] = [0,255,255]
img[:,int(picCoord[0])] = [0,255,255]
cv2.imwrite(photoName, img)
angles = {}
angles['hz'] = Angle(1/math.sin(ang['v'].GetAngle('RAD'))*(self._affinParams[0,1]*(picCoord[0] - round(self._affinParams[0,0])) + self._affinParams[0,2]*(picCoord[1] - round(self._affinParams[1,0]))))
angles['v'] = Angle(self._affinParams[1,1]*(picCoord[0] - round(self._affinParams[0,0])) + self._affinParams[1,2]*(picCoord[1] - round(self._affinParams[1,0])))
return angles
def GetAbsAngles(self, targetType = None):
"""Get absolute angles with automatical target recognition (not prism)
:param targetType: type of target (None)
:returns: corrected horinzontas (hz) and vertical (v) angles in dictionary. It contains the last correction angles too.
"""
t = time.localtime()
picName = str(t.tm_year) + '_' + str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min) + '_' + str(t.tm_sec) + '.png'
corr = self.PicMes(picName)
ang = self.GetAngles()
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
angles['chz'] = corr['hz']
angles['cv'] = corr['v']
i = 0
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
while abs(corr['hz'].GetAngle('SEC')) > 6 or abs(corr['v'].GetAngle('SEC')) > 6:
self.Move(angles['hz'], angles['v'])
corr = self.PicMes(picName)
ang = self.GetAngles()
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
angles['chz'] = corr['hz']
angles['cv'] = corr['v']
print(i)
i += 1
return angles
def FollowTarget(self):
"""Following target (beta)
"""
t = time.localtime()
picName = str(t.tm_year) + '_' + str(t.tm_mon) + '_' + str(t.tm_mday) + '_' + str(t.tm_hour) + '_' + str(t.tm_min) + '_' + str(t.tm_sec) + '.png'
i = 0
while True:
corr = self.PicMes(picName)
ang = self.GetAngles()
print('hz:', corr['hz'].GetAngle('SEC'))
print('v:', corr['v'].GetAngle('SEC'))
angles = {}
angles['hz'] = ang['hz'] - corr['hz']
angles['v'] = ang['v'] - corr['v']
print(i)
i += 1
if abs(corr['hz'].GetAngle('SEC')) > 6 or abs(corr['v'].GetAngle('SEC')) > 6 :
self.Move(angles['hz'], angles['v'])
return angles
def __del__(self):
'''destructor
'''
pass
|
zsiki/ulyxes
|
pyapi/camerastation.py
|
Python
|
gpl-2.0
| 5,445 | 0.008448 |
#!/usr/bin/env python
from subprocess import Popen, PIPE
from sys import argv
__autor__ = "Jose Jiménez"
__email__ = "jjimenezlopez@gmail.com"
__date__ = "2012/05/03"
if len(argv) == 1 or len(argv) > 2:
print 'Wrong execution format.'
print 'Correct format: any2utf /path/to/the/files'
exit(0)
path = argv[1]
if not path.endswith('/'):
path = path + '/'
path = path.replace(' ', '\ ')
proc = Popen('ls ' + path + '*.srt', stdout=PIPE, stderr=PIPE, shell=True)
result = proc.communicate()
if proc.returncode == 2:
print 'SRT files not found in path \'' + path + '\''
list = result[0].splitlines()
for f in list:
aux_f = f
aux_f.replace(' ', '\ ')
# file --mime /path/to/file.srt
#print 'file --mime \"' + aux_f + '\"'
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
result = proc.communicate()[0]
charset = result.split('charset=')[1]
charset = charset.replace('\n', '')
if charset == 'unknown-8bit':
charset = 'iso-8859-15'
if charset != 'utf-8' and charset != 'binary':
# print 'iconv -f ' + charset + ' -t utf-8 ' + aux_f + ' > ' + aux_f + '.utf'
proc = Popen('iconv -f ' + charset + ' -t utf-8 \"' + aux_f + '\" > \"' + aux_f + '.utf\"', stdout=PIPE, shell=True)
result = proc.communicate()[0]
if proc.returncode == 0:
#proc = Popen('rm ' + aux_f, stdout=PIPE, shell=True)
proc = Popen('mv \"' + aux_f + '.utf\" \"' + aux_f + '\"', stdout=PIPE, shell=True)
proc.wait()
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
text = proc.communicate()[0]
print f.split('/')[-1] + ' | ' + charset + ' --> ' + text.split('charset=')[1].replace('\n', '')
else:
proc = Popen('file --mime \"' + aux_f + '\"', stdout=PIPE, shell=True)
text = proc.communicate()[0]
print f + ' --> conversion ERROR: ' + text.split('charset=')[1].replace('\n', '')
|
jjimenezlopez/pyutils
|
srt/any2utf.py
|
Python
|
apache-2.0
| 2,015 | 0.007448 |
# -*- coding:utf-8 -*-
from . import loading
from . import registry
|
neo5g/server-gsrp5
|
server-gsrp5/modules/__init__.py
|
Python
|
agpl-3.0
| 72 | 0.013889 |
#!/usr/bin/env python
# Note: this module is not a demo per se, but is used by many of
# the demo modules for various purposes.
import wx
#---------------------------------------------------------------------------
class ColoredPanel(wx.Window):
def __init__(self, parent, color):
wx.Window.__init__(self, parent, -1, style = wx.SIMPLE_BORDER)
self.SetBackgroundColour(color)
if wx.Platform == '__WXGTK__':
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
#---------------------------------------------------------------------------
|
dnxbjyj/python-basic
|
gui/wxpython/wxPython-demo-4.0.1/demo/ColorPanel.py
|
Python
|
mit
| 577 | 0.008666 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "api.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
3n73rp455/api
|
manage.py
|
Python
|
gpl-3.0
| 535 | 0 |
from skimage.data import coffee, camera
from sklearn_theano.feature_extraction.caffe.googlenet import (
GoogLeNetTransformer, GoogLeNetClassifier)
import numpy as np
from nose import SkipTest
import os
co = coffee().astype(np.float32)
ca = camera().astype(np.float32)[:, :, np.newaxis] * np.ones((1, 1, 3),
dtype='float32')
def test_googlenet_transformer():
"""smoke test for googlenet transformer"""
if os.environ.get('CI', None) is not None:
raise SkipTest("Skipping heavy data loading on CI")
t = GoogLeNetTransformer()
t.transform(co)
t.transform(ca)
def test_googlenet_classifier():
"""smoke test for googlenet classifier"""
if os.environ.get('CI', None) is not None:
raise SkipTest("Skipping heavy data loading on CI")
c = GoogLeNetClassifier()
c.predict(co)
c.predict(ca)
|
kastnerkyle/sklearn-theano
|
sklearn_theano/feature_extraction/caffe/tests/test_googlenet.py
|
Python
|
bsd-3-clause
| 908 | 0.001101 |
'''
Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
This module creates N nodes and sends X messages randomly between nodes and records
any failures. The messages are sent in blocks of 100 and then it waits and does it again.
Created on Jul 7, 2014
@author: dfleck
'''
from twisted.trial import unittest
from twisted.internet import reactor, defer
from twisted.python import log
from gmu.chord import NetworkUtils, Config
from gmu.chord.CopyEnvelope import CopyEnvelope
import TestUtils
from ConnectivityCounter import ConnectivityCounter
import datetime
import random, sys
numNodes = 5
numMessages=5000 # Total number of messages to send
numMessagesInBlock=100 # Size of blocks to send them in
class ParallelStressTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ParallelStressTest, cls).setUpClass()
ParallelStressTest.logObs = log.startLogging(sys.stdout)
@classmethod
def tearDownClass(cls):
super(ParallelStressTest, cls).tearDownClass()
if ParallelStressTest.logObs is not None:
ParallelStressTest.logObs.stop()
def setUp(self):
'''Start the reactor so we don't have to do it in the nodes.'''
global numNodes
# Turn off warning
Config.WARN_NO_MESSAGE_AUTHENTICATOR = False
Config.ALLOW_NO_AUTHENTICATOR = True
#log.startLogging(open('parallelStressTest.log', 'w'))
# This is the IP of the node. Note: This MUST be
# an external ID or the code won't work!
self.myIP = NetworkUtils.getNonLoopbackIP (None, None)
self.allNodes = []
self.timeout = (numNodes * 5) + numMessages # How many seconds to try before erroring out
self.connectedNodeList = [] # How many are currently connected?
self.testCounter = -1
def tearDown(self):
# Stop the nodes
# self.leave(None, self.bsNode)
# self.leave(None, self.normalNode)
# #print("Tearing down...")
pass
@defer.inlineCallbacks
def testParallelP2PSending(self):
# Start a bootstrap node
(status, self.bsNode, _observer) = yield TestUtils.startupBootstrapNode(self.myIP, 12345, 'localhost')
self.assertTrue(status, 'Could not build bootstrap node')
self.allNodes.append(self.bsNode)
self.bsNode.addMessageObserver(self.messageReceived)
# Start client nodes
log.msg("Building nodes...")
for i in range(numNodes):
(status, node, observer) = yield TestUtils.startupClientNode(self.myIP, 12346+i, 'localhost', self.bsNode.nodeLocation)
self.assertTrue(status, 'Could not startupClientNode')
self.allNodes.append(node)
# Wait for flooding to reach all the nodes
waiter = ConnectivityCounter()
yield waiter.waitForConnectivity(numNodes+1, self.bsNode) # Does not count bsNode itself.
# Now do the stress test
status = yield self.doStressTest()
# Now close it all down!
yield self.allLeave()
# Wait a second or two
yield TestUtils.wait(3)
defer.returnValue(True)
@defer.inlineCallbacks
def doStressTest(self):
'''Randomly pick two nodes and send a message between them. Verify that it goes.'''
print("Running parallel stress test: %d p2p messages" % numMessages)
messageCounter = 0
while messageCounter < numMessages:
if messageCounter % 100 == 0:
print("Running test %d of %d" % (messageCounter, numMessages))
statusList = []
for _ in range(numMessagesInBlock):
messageCounter += 1
(srcNode, dstNode) = random.sample(self.allNodes, 2)
# Build the envelope
env = CopyEnvelope()
env['ttl'] = datetime.datetime.now() + datetime.timedelta(minutes=10)
env['source'] = srcNode.nodeLocation
env['type'] = 'p2p'
env['destination'] = dstNode.nodeLocation.id
env['msgID'] = random.getrandbits(128) # TODO: Something better here!
msgText = "Test number %d " % messageCounter
statusList.append(srcNode.sendSyncMessage(msgText, env))
# Now wait for all of them to complete
dl = defer.DeferredList(statusList)
results = yield dl # Wait for it
# Now check all the return codes
for (success, _) in results:
#print("DEBUG: doStressTest Result is %s" % success)
self.assertTrue(success, "doStressTest Message returned False!" )
# Wait a bit... just to ease up a smidge.
yield TestUtils.wait(0.1)
defer.returnValue(True)
def messageReceived(self, msg, dummy_Envelope):
'''This is a receiver for the bootstrap node only!
We got a message. For flooding pingbacks the message format is:
type:PINGBACK
loc:sender
msgNum:number
'''
if not isinstance(msg, dict):
return
if 'type' in msg:
theType= msg['type']
if theType == "PINGBACK":
if msg['msgNum'] == 0: # Setup message only
# Add the sender to the list of nodes we know of
self.addNode(msg['loc'])
#print("Metrics NetworkConnect addNode: %s" % str(msg['loc']))
elif msg['msgNum'] == self.testCounter:
# We have a message from a current PING, count it!
self.connectedNodeList.append(msg['loc'])
else:
# Typically this means a message came in late
log.msg("ParallelStressTest got an unknown message:%s" % msg)
def allLeave(self):
'''Tell the node to leave the network.'''
for node in self.allNodes:
node.leave()
return True
|
danfleck/Class-Chord
|
network-client/src/tests/ParallelStressTest.py
|
Python
|
apache-2.0
| 6,454 | 0.01255 |
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.template.context_processors import csrf
from common import post_required, login_required, redirect_if_loggedin, __redirect
# Create your views here.
def index(request):
data = {'title': 'Error', 'page':'home'};
# return HttpResponse ('This is Invalid Request')
file = device.get_template(request, 'error_error.html')
return render(request, file, data)
def invalid_request_view(request):
data = {'title': 'Invalid Request', 'page':'home'};
# return HttpResponse ('This is Invalid Request')
file = device.get_template(request, 'error_invalid_request.html')
return render(request, file, data)
def under_construction_view(request):
data = {'title': 'Under Construction', 'page':'home'};
file = device.get_template(request, 'error_under_construction.html')
return render(request, file, data)
|
amitdhiman000/dais
|
error/views.py
|
Python
|
apache-2.0
| 920 | 0.022826 |
from django import template
from django.conf import settings
from django.db.models import get_model
from django.template import defaultfilters, loader
from .. import library
from .. import renderers
from ..dashboard import forms
ContentType = get_model('contenttypes', 'ContentType')
register = template.Library()
@register.assignment_tag
def update_blocks_form(page, container_name):
container = page.get_container_from_name(container_name)
if not container:
return None
return forms.BlockUpdateSelectForm(container)
@register.simple_tag(takes_context=True)
def render_attribute(context, attr_name, *args):
"""
Render an attribute based on editing mode.
"""
block = context.get(renderers.BlockRenderer.context_object_name)
value = getattr(block, attr_name)
for arg in args:
flt = getattr(defaultfilters, arg)
if flt:
value = flt(value)
user = context.get('request').user
if not user.is_authenticated:
return unicode(value)
if not user.is_staff:
return unicode(value)
wrapped_attr = u'<span id="block-{uuid}-{attr_name}">{value}</span>'
return wrapped_attr.format(
uuid=block.uuid, attr_name=attr_name, value=unicode(value))
@register.assignment_tag(takes_context=True)
def get_object_visibility(context, obj):
try:
return obj.is_visible
except AttributeError:
pass
return True
@register.simple_tag(takes_context=True)
def render_block_form(context, form):
model = form._meta.model
model_name = model.__name__.lower()
template_names = [
"%s/%s_form.html" % (model._meta.app_label, model_name),
"fancypages/blocks/%s_form.html" % model_name, form.template_name]
tmpl = loader.select_template(template_names)
context['missing_image_url'] = "%s/%s" % (
settings.MEDIA_URL, getattr(settings, "OSCAR_MISSING_IMAGE_URL", ''))
return tmpl.render(context)
@register.filter
def depth_as_range(depth):
# reduce depth by 1 as treebeard root depth is 1
return range(depth - 1)
@register.assignment_tag
def get_content_type(obj):
return ContentType.objects.get_for_model(obj.__class__)
@register.inclusion_tag(
'fancypages/dashboard/block_select.html', takes_context=True)
def render_block_selection(context):
request = context.get('request')
if not request or not request.fp_edit_mode:
return u''
grouped_blocks = library.get_grouped_content_blocks()
return {'grouped_blocks': grouped_blocks}
|
socradev/django-fancypages
|
fancypages/templatetags/fp_block_tags.py
|
Python
|
bsd-3-clause
| 2,534 | 0 |
#!/usr/bin/python
## Download files from Amazon S3 (e.g. raw photos for 3D models)
## Andy Bevan 15-Jun-2014, updated 21-Nov-2014
## Daniel Pett updated 05-Jan-2016
__author__ = 'ahb108'
## Currently for Python 2.7.5 (tested on MacOSX 10.9.2) launched in a virtual environment:
from PIL import Image # Pillow with libjpeg support
from PIL import ImageDraw
import urllib3
import json
import re
import numpy as np
import argparse
import os
import urllib2
import zipfile
# Argument parser
parser = argparse.ArgumentParser(description='This is a script to combine vector polygon masks into a binary raster mask for 3d modelling.')
parser.add_argument('-a','--app',help='MicroPasts application', required=True)
parser.add_argument('-w','--wd', help='Working directory',required=True)
args = parser.parse_args()
## Global settings ##
os.chdir(args.wd)
app = args.app
pybinst = 'http://crowdsourced.micropasts.org'
###################################
# Get the raw jpg files from working directory
ext = ['.JPG', '.jpg', '.jpeg', '.JPEG']
files = [ f for f in os.listdir('.') if f.endswith(tuple(ext)) ]
print("Masking each individual photograph...")
for q in range(0, len(files)):
# Open an example image
img = Image.open(files[q])
imnameonly = os.path.splitext(files[q])[0]
# Get JSON data for tasks and find task ID for this file
downloadURL = str(pybinst) + '/project/' + str(app) + '/tasks/export?type=task&format=json'
outputFilename = str(app) + '_task.json'
# Download JSON file to working direcory
response = urllib2.urlopen(downloadURL)
zippedData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(zippedData)
output.close()
# Extract the data
zfobj = zipfile.ZipFile(outputFilename)
for name in zfobj.namelist():
uncompressed = zfobj.read(name)
# Save uncompressed data to disk
outputFilename = name
output = open(outputFilename,'wb')
output.write(uncompressed)
output.close()
with open(outputFilename) as data_file:
jtasks = json.load(data_file)
# Loop through looking for those tasks with the necessary look-up image (almost always one
# unless tasks have been duplicated, but allowing more than one just in case)
imtasks = []
for elm in range(0, len(jtasks)):
onetask = jtasks[elm]
onetaskurl = onetask['info']['url_b'].encode('utf-8')
if re.search(files[q], onetaskurl): imtasks.extend([onetask['id']])
# Get JSON data for task runs (even if they are duplicated)
jtaskruns = []
for a in range(0, len(imtasks)):
downloadURL = str(pybinst) + '/project/' + str(app) + '/' + str(imtasks[a]) + '/results.json'
outputFilename = str(app) + str(imtasks[a]) + '_task_run.json'
# Download JSON files to working direcory
response = urllib2.urlopen(downloadURL)
fileData = response.read()
# Save data to disk
output = open(outputFilename,'wb')
output.write(fileData)
output.close()
with open(outputFilename) as data_file:
jtaskruns.extend(json.load(data_file))
# Loop through and extract outlines
for a in range(0, len(jtaskruns)):
jtaskrun = jtaskruns[a] # one contributor
imtmp = Image.new("L", img.size, color=0)
draw = ImageDraw.Draw(imtmp)
# Loop through outline (or possible multiple outline polygons)
for outs in range(0, len(jtaskrun['info']['outline'])):
# Extract the outline and convert to tuples
o0 = jtaskrun['info']['outline'][outs][0]
p = [] # Empty list for outline vertices
h = img.size[1] # Get image height
for x in range(0, len(o0)):
xy = o0[x]
xy[1] = h - xy[1] # reverse y-coordinates
p.append(tuple(xy))
draw.polygon(tuple(p), fill=255)
# Loop through holes in same way
for hls in range(0, len(jtaskrun['info']['holes'])):
h0 = jtaskrun['info']['holes'][hls][0]
ph = []
for x in range(0, len(h0)):
xy = h0[x]
xy[1] = h - xy[1]
ph.append(tuple(xy))
draw.polygon(tuple(ph), fill=0)
# imtmp.show()
if jtaskrun['user_id'] is None:
fn = imnameonly + '_mask_' + str(a) + '_anon.JPG'
else:
fn = imnameonly + '_mask_' + str(a) + '_user' + str(jtaskrun['user_id']) + '.JPG'
imtmp.save(fn)
if a is 1:
fn1 = imnameonly + '_mask.JPG'
imtmp.save(fn1)
print("Done.")
|
MicroPasts/MicroPasts-Scripts
|
photoMasking/photoMasking.py
|
Python
|
apache-2.0
| 4,679 | 0.006412 |
"""Copyright 2011 The University of Michigan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors - Jie Yu (jieyu@umich.edu)
"""
import os
from maple.core import logging
from maple.core import static_info
from maple.core import testing
from maple.race import testing as race_testing
from maple.systematic import program
from maple.systematic import search
class ChessTestCase(testing.DeathTestCase):
""" Run a test under the CHESS scheduler.
"""
def __init__(self, test, mode, threshold, controller):
testing.DeathTestCase.__init__(self, test, mode, threshold)
self.controller = controller
def threshold_check(self):
if self.search_done():
return True
if testing.DeathTestCase.threshold_check(self):
return True
return False
def search_done(self):
sinfo = static_info.StaticInfo()
sinfo.load(self.controller.knobs['sinfo_out'])
prog = program.Program(sinfo)
prog.load(self.controller.knobs['program_out'])
search_info = search.SearchInfo(sinfo, program)
search_info.load(self.controller.knobs['search_out'])
return search_info.done()
def after_each_test(self):
iteration = len(self.test_history)
used_time = self.test_history[-1].used_time()
logging.msg('=== chess iteration %d done === (%f) (%s)\n' % (iteration, used_time, os.getcwd()))
def after_all_tests(self):
if self.is_fatal():
logging.msg('chess fatal error detected\n')
else:
logging.msg('chess threshold reached\n')
def log_stat(self):
runs = len(self.test_history)
used_time = self.used_time()
logging.msg('%-15s %d\n' % ('chess_runs', runs))
logging.msg('%-15s %f\n' % ('chess_time', used_time))
class RaceTestCase(race_testing.TestCase):
""" Run race detector to find all racy instructions.
"""
def __init__(self, test, mode, threshold, profiler):
race_testing.TestCase.__init__(self, test, mode, threshold, profiler)
class ChessRaceTestCase(testing.TestCase):
""" Run race detecctor to find all racy instructions first, and
then run the chess scheduler with sched_race on.
"""
def __init__(self, race_testcase, chess_testcase):
testing.TestCase.__init__(self)
self.race_testcase = race_testcase
self.chess_testcase = chess_testcase
def is_fatal(self):
assert self.done
if self.race_testcase.is_fatal() or self.chess_testcase.is_fatal():
return True
else:
return False
def body(self):
self.race_testcase.run()
if self.race_testcase.is_fatal():
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
else:
self.chess_testcase.run()
logging.msg('\n')
logging.msg('---------------------------\n')
self.race_testcase.log_stat()
self.chess_testcase.log_stat()
|
jieyu/maple
|
script/maple/systematic/testing.py
|
Python
|
apache-2.0
| 3,528 | 0.003401 |
from __future__ import print_function
from ADSOrcid.models import ClaimsLog
from ADSOrcid import tasks
from collections import defaultdict
app = tasks.app
def run():
stats = defaultdict(lambda: 0)
authors = {}
i = 0
with app.session_scope() as session:
for r in session.query(ClaimsLog).order_by(ClaimsLog.id.asc()).yield_per(1000):
stats[r.status] += 1
if r.orcidid and r.bibcode:
if r.orcidid not in authors:
authors[r.orcidid] = {'claimed': 0, 'forced': 0, '#full-import': 0, 'updated': 0, 'removed': 0, 'unchanged': 0}
authors[r.orcidid][r.status] += 1
if i % 100000 == 0:
print('read ', i, 'rows')
i += 1
print('read', i, 'rows')
print(stats)
print(authors)
if __name__ == '__main__':
run()
|
adsabs/ADSOrcid
|
scripts/count.py
|
Python
|
gpl-3.0
| 863 | 0.008111 |
from tastypie import fields
from tastypie.bundle import Bundle
from tastypie.resources import ModelResource, ALL, ALL_WITH_RELATIONS
from api.authorization import DateaBaseAuthorization
from api.authentication import ApiKeyPlusWebAuthentication
from api.base_resources import JSONDefaultMixin
from api.serializers import UTCSerializer
from django.template.defaultfilters import linebreaksbr
from tastypie.cache import SimpleCache
from tastypie.throttle import CacheThrottle
from django.contrib.contenttypes.models import ContentType
from account.utils import get_domain_from_url
from comment.models import Comment
class CommentResource(JSONDefaultMixin, ModelResource):
user = fields.ToOneField('account.resources.UserResource',
attribute='user', full=True, readonly=True)
def dehydrate(self, bundle):
user_data = {
'username': bundle.data['user'].data['username'],
'image_small': bundle.data['user'].data['image_small'],
'id': bundle.data['user'].data['id']
}
bundle.data['user'] = user_data
bundle.data['content_type'] = bundle.obj.content_type.model
return bundle
def hydrate(self,bundle):
# preserve data
if bundle.request.method == 'PATCH':
#preserve original fields
fields = ['user', 'published', 'content_type', 'object_id', 'created', 'client_domain']
orig_obj = Comment.objects.get(pk=int(bundle.data['id']))
for f in fields:
if f in request.data:
request.data[f] = getattr(orig_obj, f)
elif bundle.request.method == 'POST':
# enforce post user
bundle.obj.user = bundle.request.user
bundle.data['user'] = bundle.request.user.id
# convert model name into model
bundle.obj.content_type = ContentType.objects.get(model=bundle.data['content_type'])
bundle.obj.client_domain = get_domain_from_url(bundle.request.META.get('HTTP_ORIGIN', ''))
del bundle.data['content_type']
return bundle
def apply_sorting(self, obj_list, options=None):
if options is None:
options = {}
else:
options = options.copy()
if not 'order_by' in options:
options['order_by'] = 'created'
return super(CommentResource, self).apply_sorting(obj_list, options)
class Meta:
queryset = Comment.objects.all()
resource_name = 'comment'
allowed_methods = ['get', 'post', 'patch', 'delete']
serializer = UTCSerializer(formats=['json'])
filtering={
'id' : ['exact'],
'user': ALL_WITH_RELATIONS,
'content_type': ALL_WITH_RELATIONS,
'object_id': ['exact']
}
authentication = ApiKeyPlusWebAuthentication()
authorization = DateaBaseAuthorization()
limit = 50
excludes = ['client_domain']
ordering=['created']
#cache = SimpleCache(timeout=5)
throttle = CacheThrottle(throttle_at=500)
always_return_data = True
include_resource_uri = False
def get_comment_resource_class():
return CommentResource
|
lafactura/datea-api
|
datea_api/apps/comment/resources.py
|
Python
|
agpl-3.0
| 3,337 | 0.007192 |
"""
A special type of hypothesis whose value is a function.
The function is automatically eval-ed when we set_value, and is automatically hidden and unhidden when we pickle
This can also be called like a function, as in fh(data)!
"""
from Hypothesis import Hypothesis
from copy import copy
class FunctionHypothesis(Hypothesis):
"""
A special type of hypothesis whose value is a function.
The function is automatically eval-ed when we set_value, and is automatically hidden and unhidden when we pickle
This can also be called like a function, as in fh(data)!
"""
def __init__(self, value=None, f=None, display="lambda x: %s", **kwargs):
"""
*value* - the value of this hypothesis
*f* - defaultly None, in which case this uses self.value2function
*args* - the arguments to the function
"""
# this initializes prior and likleihood variables, so keep it here!
# However, don't give it value, since then it calls set_value with no f argument!
Hypothesis.__init__(self, None, display=display, **kwargs)
# And set our value
self.set_value(value, f=f)
def __call__(self, *vals):
# The below assertions are useful but VERY slow
#assert not any([isinstance(x, FunctionData) for x in vals]), "*** Probably you mean to pass FunctionData.input instead of FunctionData?"
#assert callable(self.fvalue)
return self.fvalue(*vals)
def compile_function(self):
"""
Takes my value and returns what function I compute. Internally cached by set_value
NOTE: This must be overwritten by subclasses to something useful--see LOTHypothesis
"""
raise NotImplementedError
def set_value(self, value, f=None):
"""
Sets the value for the hypothesis.
Another option: send f if speed is necessary
"""
Hypothesis.set_value(self, value)
if f is not None:
self.fvalue = f
elif value is None:
self.fvalue = None
else:
self.fvalue = self.compile_function() # now that the value is set
def force_function(self, f):
"""
Sets the function to f, ignoring value.
:param f: - a python function (object)
:return:
"""
self.set_value( "<FORCED_FUNCTION>", f=f)
def compute_single_likelihood(self, datum):
"""
A function that must be implemented by subclasses to compute the likelihood of a single datum/response pair.
This should NOT implement the temperature (that is handled by compute_likelihood)
"""
raise NotImplementedError
# ~~~~~~~~~
# Make this thing pickleable
def __getstate__(self):
""" We copy the current dict so that when we pickle, we destroy the function"""
dd = copy(self.__dict__)
dd['fvalue'] = None # clear the function out
return dd
def __setstate__(self, state):
"""
sets the state of the hypothesis (when we unpickle)
"""
self.__dict__.update(state)
self.set_value(self.value) # just re-set the value so that we re-compute the function
|
joshrule/LOTlib
|
LOTlib/Hypotheses/FunctionHypothesis.py
|
Python
|
gpl-3.0
| 3,317 | 0.006331 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Packing metadata for setuptools."""
from io import open
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
with open('README.rst', encoding='utf-8') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst', encoding='utf-8') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'orderedset',
'hamster-lib >= 0.13.0',
]
setup(
name='hamster-gtk',
version='0.11.0',
description="A GTK interface to the hamster time tracker.",
long_description=readme + '\n\n' + history,
author="Eric Goller",
author_email='eric.goller@projecthamster.org',
url='https://github.com/projecthamster/hamster-gtk',
packages=find_packages(exclude=['tests*']),
install_requires=requirements,
license="GPL3",
zip_safe=False,
keywords='hamster-gtk',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
entry_points='''
[gui_scripts]
hamster-gtk=hamster_gtk.hamster_gtk:_main
''',
package_data={
'hamster_gtk': ['resources/hamster-gtk.gresource'],
},
)
|
projecthamster/hamster-gtk
|
setup.py
|
Python
|
gpl-3.0
| 1,564 | 0 |
import numpy as np
NR_PER_CONDITION = 1000
neuro_sigma = 4
neuro_mean = 0
satis_sigma = 4
satis_mean = 0
print "Start drawing"
bins = {
5: np.array([-6, -3, 0, 3, 6]),
7: np.array([-6, -4, -2, 0, 2, 4, 6])
}
borders = {
5: np.array([-4.5,-1.5,1.5,4.5]),
7: np.array([-5.,-3.,-1.,1,3,5])
}
'output.dat'
conditions = [
{'cond': 1, 'first': 5, 'second': 5},
{'cond': 2, 'first': 7, 'second': 7},
{'cond': 3, 'first': 5, 'second': 7},
{'cond': 4, 'first': 7, 'second': 5}
]
neuro_vals = np.empty([12,NR_PER_CONDITION])
satis_vals = np.empty([12,NR_PER_CONDITION])
outfile = file('output.dat', 'w')
outfile.write('cond')
for i in range(12):
outfile.write('\tneuro'+str(i+1))
for i in range(12):
outfile.write('\tsatis'+str(i+1))
outfile.write('\n')
for cond in conditions:
print "Writing condition ", cond['cond']
for i in range(12):
neuro = neuro_sigma * np.random.randn(NR_PER_CONDITION) + neuro_mean
neuro_index = np.digitize(neuro, borders[cond['first']])
neuro_vals[i] = bins[cond['first']][neuro_index]
satis = satis_sigma * np.random.randn(NR_PER_CONDITION) + satis_mean
satis_index = np.digitize(satis, borders[cond['second']])
satis_vals[i] = bins[cond['second']][satis_index]
cond_arr = np.full([1,NR_PER_CONDITION], cond['cond'])
output = np.concatenate((cond_arr, neuro_vals, satis_vals) )
np.savetxt(outfile, output.transpose(), fmt="%2i")
outfile.close()
print "Finished"
|
mpauly/psych-simulation
|
simulate.py
|
Python
|
apache-2.0
| 1,499 | 0.011341 |
from __future__ import unicode_literals
from django.apps import AppConfig
class ImagerImagesConfig(AppConfig):
name = 'imager_images'
def ready(self):
"""Run when app ready."""
from imager_images import signals
|
jaredscarr/django-imager
|
imagersite/imager_images/apps.py
|
Python
|
mit
| 238 | 0 |
class ProjectManager(object):
def create(self, project_name):
pass
def delete(self, project_name):
pass
def list_projects(self):
pass
def upload_file(self, project_name, filename):
pass
|
octopus-platform/bjoern
|
python/octopus-tools/octopus/server/project_manager.py
|
Python
|
gpl-3.0
| 237 | 0 |
import pytest
class TestService:
@pytest.mark.complete("service ")
def test_1(self, completion):
assert completion
|
algorythmic/bash-completion
|
test/t/test_service.py
|
Python
|
gpl-2.0
| 133 | 0 |
from __future__ import print_function, absolute_import
from setuptools import setup, find_packages, Extension, Command
from setuptools.command.build_ext import build_ext
from setuptools.command.egg_info import egg_info
from distutils.file_util import copy_file
from distutils.dir_util import mkpath, remove_tree
from distutils.util import get_platform
from distutils import log
import os
import sys
import subprocess
if sys.version_info[:2] < (3, 6):
sys.exit(
"error: Python 3.6 is required to run setup.py. \n"
"The generated wheel will be compatible with both py27 and py3+"
)
cmdclass = {}
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
pass
else:
class UniversalBdistWheel(bdist_wheel):
def get_tag(self):
return ("py2.py3", "none") + bdist_wheel.get_tag(self)[2:]
cmdclass["bdist_wheel"] = UniversalBdistWheel
class Download(Command):
user_options = [
("version=", None, "ots source version number to download"),
("sha256=", None, "expected SHA-256 hash of the source archive"),
("download-dir=", "d", "where to unpack the 'ots' dir (default: src/c)"),
("clean", None, "remove existing directory before downloading"),
]
boolean_options = ["clean"]
URL_TEMPLATE = (
"https://github.com/khaledhosny/ots/releases/download/"
"v{version}/ots-{version}.tar.xz"
)
def initialize_options(self):
self.version = None
self.download_dir = None
self.clean = False
self.sha256 = None
def finalize_options(self):
if self.version is None:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError("must specify --version to download")
if self.sha256 is None:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError("must specify --sha256 of downloaded file")
if self.download_dir is None:
self.download_dir = os.path.join("src", "c")
self.url = self.URL_TEMPLATE.format(**vars(self))
def run(self):
from urllib.request import urlopen
from io import BytesIO
import tarfile
import lzma
import hashlib
output_dir = os.path.join(self.download_dir, "ots")
if self.clean and os.path.isdir(output_dir):
remove_tree(output_dir, verbose=self.verbose, dry_run=self.dry_run)
if os.path.isdir(output_dir):
log.info("{} was already downloaded".format(output_dir))
else:
archive_name = self.url.rsplit("/", 1)[-1]
mkpath(self.download_dir, verbose=self.verbose, dry_run=self.dry_run)
log.info("downloading {}".format(self.url))
if not self.dry_run:
# response is not seekable so we first download *.tar.xz to an
# in-memory file, and then extract all files to the output_dir
# TODO: use hashlib to verify the SHA-256 hash
f = BytesIO()
with urlopen(self.url) as response:
f.write(response.read())
f.seek(0)
actual_sha256 = hashlib.sha256(f.getvalue()).hexdigest()
if actual_sha256 != self.sha256:
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"invalid SHA-256 checksum:\n"
"actual: {}\n"
"expected: {}".format(actual_sha256, self.sha256)
)
log.info("unarchiving {} to {}".format(archive_name, output_dir))
if not self.dry_run:
with lzma.open(f) as xz:
with tarfile.open(fileobj=xz) as tar:
filelist = tar.getmembers()
first = filelist[0]
if not (first.isdir() and first.name.startswith("ots")):
from distutils.errors import DistutilsSetupError
raise DistutilsSetupError(
"The downloaded archive is not recognized as "
"a valid ots source tarball"
)
# strip the root 'ots-X.X.X' directory before extracting
rootdir = first.name + "/"
to_extract = []
for member in filelist[1:]:
if member.name.startswith(rootdir):
member.name = member.name[len(rootdir) :]
to_extract.append(member)
tar.extractall(output_dir, members=to_extract)
class Executable(Extension):
if os.name == "nt":
suffix = ".exe"
else:
suffix = ""
def __init__(self, name, script, options=None, output_dir=".", cwd=None, env=None):
Extension.__init__(self, name, sources=[])
self.target = self.name.split(".")[-1] + self.suffix
self.script = script
self.options = options or []
self.output_dir = output_dir
self.cwd = cwd
self.env = env
class ExecutableBuildExt(build_ext):
def finalize_options(self):
from distutils.ccompiler import get_default_compiler
build_ext.finalize_options(self)
if self.compiler is None:
self.compiler = get_default_compiler(os.name)
self._compiler_env = dict(os.environ)
def get_ext_filename(self, ext_name):
for ext in self.extensions:
if isinstance(ext, Executable):
return os.path.join(*ext_name.split(".")) + ext.suffix
return build_ext.get_ext_filename(self, ext_name)
def run(self):
self.run_command("download")
if self.compiler == "msvc":
self.call_vcvarsall_bat()
build_ext.run(self)
def call_vcvarsall_bat(self):
import struct
from distutils._msvccompiler import _get_vc_env
arch = "x64" if struct.calcsize("P") * 8 == 64 else "x86"
vc_env = _get_vc_env(arch)
self._compiler_env.update(vc_env)
def build_extension(self, ext):
if not isinstance(ext, Executable):
build_ext.build_extension(self, ext)
return
cmd = [sys.executable, ext.script] + ext.options + [ext.target]
if self.force:
cmd += ["--force"]
log.debug("running '{}'".format(" ".join(cmd)))
if not self.dry_run:
env = self._compiler_env.copy()
if ext.env:
env.update(ext.env)
p = subprocess.run(cmd, cwd=ext.cwd, env=env)
if p.returncode != 0:
from distutils.errors import DistutilsExecError
raise DistutilsExecError(
"running '{}' script failed".format(ext.script)
)
exe_fullpath = os.path.join(ext.output_dir, ext.target)
dest_path = self.get_ext_fullpath(ext.name)
mkpath(os.path.dirname(dest_path), verbose=self.verbose, dry_run=self.dry_run)
copy_file(exe_fullpath, dest_path, verbose=self.verbose, dry_run=self.dry_run)
class CustomEggInfo(egg_info):
def run(self):
# make sure the ots source is downloaded before creating sdist manifest
self.run_command("download")
egg_info.run(self)
cmdclass["download"] = Download
cmdclass["build_ext"] = ExecutableBuildExt
cmdclass["egg_info"] = CustomEggInfo
build_options = []
platform_tags = get_platform().split("-")
if "macosx" in platform_tags:
if "universal2" in platform_tags:
build_options.append("--mac-target=universal2")
elif "arm64" in platform_tags:
build_options.append("--mac-target=arm64")
ots_sanitize = Executable(
"ots.ots-sanitize",
script="build.py",
options=build_options,
output_dir=os.path.join("build", "meson"),
)
with open("README.md", "r", encoding="utf-8") as readme:
long_description = readme.read()
setup(
name="opentype-sanitizer",
use_scm_version={"write_to": "src/python/ots/_version.py"},
description=("Python wrapper for the OpenType Sanitizer"),
long_description=long_description,
long_description_content_type="text/markdown",
author="Cosimo Lupo",
author_email="cosimo@anthrotype.com",
url="https://github.com/googlefonts/ots-python",
license="OpenSource, BSD-style",
platforms=["posix", "nt"],
package_dir={"": "src/python"},
packages=find_packages("src/python"),
ext_modules=[ots_sanitize],
zip_safe=False,
cmdclass=cmdclass,
setup_requires=["setuptools_scm"],
extras_require={"testing": ["pytest"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Text Processing :: Fonts",
"Topic :: Multimedia :: Graphics",
],
)
|
googlefonts/ots-python
|
setup.py
|
Python
|
bsd-3-clause
| 9,371 | 0.00096 |
#!/usr/bin/env python
"""
CsPython Tutorial Example 2
By Mark Gossage (mark@gossage.cjb.net)
A pure-Python script to show the use of Crystal Space.
To use this, ensure that your PYTHONPATH, CRYSTAL, and LD_LIBRARY_PATH
(or DYLD_LIBRARY_PATH for MacOS/X; or PATH for Windows) variables are set
approrpriately, and then run the script with the command:
python scripts/python/tutorial2.py
This performs the same features as the C++ tutorial2.
It creates a room and a 3D sprite.
===========================================================================
There are two ways to use the CsPython module.
Either as a plugin within CS (pysimp),
or as a pure Python module (this example).
This is just the CS C++ tutorial 2 rewritten in Python.
Overall it looks remarkable like the original C++ code,
just with Python syntax; but the functions are all the same names and formats
(use None instead of NULL, and "" instead of (char*)NULL).
Please refer to the CS Tutorial 2 in the documentation
for detail on how the C++ works.
"""
import types, string, re, sys
import traceback
try: # get in CS
from cspace import *
except:
print "WARNING: Failed to import module cspace"
traceback.print_exc()
sys.exit(1) # die!!
# utils code
#############################
# Note: we are assuming a global 'object_reg'
# which will be defined later
def Report (severity, msg):
"Reporting routine"
csReport(object_reg, severity, "crystalspace.application.python", msg)
def Log(msg):
Report(CS_REPORTER_SEVERITY_NOTIFY, msg)
def FatalError(msg="FatalError"):
"A Panic & die routine"
Report(CS_REPORTER_SEVERITY_ERROR,msg)
sys.exit(1)
# The application
#############################
class MyCsApp:
def Init(self):
Log('MyCsApp.Init()...')
self.vc = object_reg.Get(iVirtualClock)
self.engine = object_reg.Get(iEngine)
self.g3d = object_reg.Get(iGraphics3D)
self.loader = object_reg.Get(iLoader)
self.keybd = object_reg.Get(iKeyboardDriver)
if self.vc==None or self.engine==None or self.g3d==None or self.keybd==None or self.loader==None:
FatalError("Error: in object registry query")
if not csInitializer.OpenApplication(object_reg):
FatalError("Could not open the application!")
txtmgr=self.g3d.GetTextureManager()
room=self.SetupRoom() # creates & returns the room
self.CreateLights(room)
self.LoadSprites(room)
self.CreateCamera(room,csVector3(0, 5, -3))
self.engine.Prepare()
SimpleStaticLighter.ShineLights(room, self.engine, 4)
Log('MyCsApp.Init() finished')
def SetupRoom(self):
# load a texture
if self.loader.LoadTexture("stone", "/lib/std/stone4.gif") == None:
FatalError("Error: unable to load texture")
# now get it as a material from the engine
material = self.engine.GetMaterialList().FindByName("stone")
# create the 'room'
room = self.engine.CreateSector("room")
mapper = DensityTextureMapper(0.3)
box = TesselatedBox(csVector3(-5, 0, -5), csVector3(5, 20, 5))
box.SetLevel(3)
box.SetMapper(mapper)
box.SetFlags(Primitives.CS_PRIMBOX_INSIDE)
walls = GeneralMeshBuilder.CreateFactoryAndMesh (self.engine, room, \
"walls", "walls_factory", box)
walls.GetMeshObject().SetMaterialWrapper(material)
return room
def CreateLights(self,room):
# lights
ll = room.GetLights ()
light = self.engine.CreateLight ("", csVector3 (-3, 5, 0), 10,csColor (1, 0, 0), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
light = self.engine.CreateLight ("", csVector3 (3, 5, 0), 10,csColor (0, 0, 1), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
light = self.engine.CreateLight ("", csVector3 (0, 5, -3), 10,csColor (0, 1, 0), CS_LIGHT_DYNAMICTYPE_STATIC)
ll.Add (light)
def LoadSprites(self,room):
# Load a texture for our sprite.
txtmgr=self.g3d.GetTextureManager()
txt=self.loader.LoadTexture ("spark","/lib/std/spark.png",CS_TEXTURE_3D,txtmgr,1)
if txt == None:
FatalError("Error loading texture!")
# Load a sprite template from disk.
imeshfact=self.loader.LoadMeshObjectFactory("/lib/std/sprite1")
if imeshfact == None:
FatalError("Error loading mesh!")
# Create the sprite and add it to the engine.
sprite=self.engine.CreateMeshWrapper(imeshfact,"MySprite",room,csVector3 (-3, 5, 3))
m=csMatrix3()
m.Identity() # make sure its identity
#m.__imul__(5.) # this is the same as m=m*5
m=m*5
sprite.GetMovable().SetTransform(m)
sprite.GetMovable().UpdateMove()
spstate=sprite.GetMeshObject().QueryInterface(iSprite3DState)
spstate.SetAction("default")
#spstate.SetMixMode(CS_FX_SETALPHA (.5))
# The following two calls are not needed since CS_ZBUF_USE and
# Object render priority are the default but they show how you
# can do this.
sprite.SetZBufMode(CS_ZBUF_USE)
sprite.SetRenderPriority(self.engine.GetObjectRenderPriority())
def CreateCamera(self,room,origin):
self.view = csView(self.engine, self.g3d)
self.view.GetCamera().SetSector(room)
self.view.GetCamera().GetTransform().SetOrigin(origin)
g2d = self.g3d.GetDriver2D()
self.view.SetRectangle(2, 2, g2d.GetWidth() - 4, g2d.GetHeight() - 4)
def SetupFrame (self):
#print 'SetupFrame called',
elapsed_time = self.vc.GetElapsedTicks()
# Now rotate the camera according to keyboard state
speed = (elapsed_time / 1000.) * (0.03 * 20);
if self.keybd.GetKeyState(CSKEY_RIGHT):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_ROT_RIGHT, speed)
if self.keybd.GetKeyState(CSKEY_LEFT):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_ROT_LEFT, speed)
if self.keybd.GetKeyState(CSKEY_PGUP):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_TILT_UP, speed)
if self.keybd.GetKeyState(CSKEY_PGDN):
self.view.GetCamera().GetTransform().RotateThis(CS_VEC_TILT_DOWN, speed)
if self.keybd.GetKeyState(CSKEY_UP):
self.view.GetCamera().Move(CS_VEC_FORWARD * 4 * speed)
if self.keybd.GetKeyState(CSKEY_DOWN):
self.view.GetCamera().Move(CS_VEC_BACKWARD * 4 * speed)
# Tell 3D driver we're going to display 3D things.
if not self.g3d.BeginDraw(self.engine.GetBeginDrawFlags() | CSDRAW_3DGRAPHICS):
FatalError()
self.view.Draw()
#print 'SetupFrame done'
def FinishFrame(self):
#print 'FinishFrame called'
self.g3d.FinishDraw()
self.g3d.Print(None)
#print 'FinishFrame done'
# EventHandler
#############################
def EventHandler(ev):
#print 'EventHandler called'
if ((ev.Name == KeyboardDown) and
(csKeyEventHelper.GetCookedCode(ev) == CSKEY_ESC)):
q = object_reg.Get(iEventQueue)
if q:
q.GetEventOutlet().Broadcast(csevQuit(object_reg))
return 1
elif ev.Name == Frame:
app.SetupFrame()
app.FinishFrame()
return 1
return 0
# startup code
#############################
# we could write a 'main' fn for this
# but I decided to put in in the body of the app
object_reg = csInitializer.CreateEnvironment(sys.argv)
if object_reg is None:
FatalError("Couldn't create enviroment!")
if csCommandLineHelper.CheckHelp(object_reg):
csCommandLineHelper.Help(object_reg)
sys.exit(0)
if not csInitializer.SetupConfigManager(object_reg):
FatalError("Couldn't init app!")
plugin_requests = [
CS_REQUEST_VFS, CS_REQUEST_OPENGL3D, CS_REQUEST_ENGINE,
CS_REQUEST_FONTSERVER, CS_REQUEST_IMAGELOADER, CS_REQUEST_LEVELLOADER,
]
if not csInitializer.RequestPlugins(object_reg, plugin_requests):
FatalError("Plugin requests failed!")
# setup the event handler:
# note: we need not even make EventHandler() a global fn
# python would accept it as a member fn of MyCsApp
if not csInitializer.SetupEventHandler(object_reg, EventHandler):
FatalError("Could not initialize event handler!")
# Get some often used event IDs
KeyboardDown = csevKeyboardDown(object_reg)
Frame = csevFrame(object_reg)
app=MyCsApp() # this is the one & only app
app.Init() # turn on the app
# this also now calls OpenApplication
csDefaultRunLoop(object_reg)
app=None # need to do this or you get 'unreleased instances' warning
# See! CsPython manages the smart pointers correctly
csInitializer.DestroyApplication (object_reg) # bye bye
object_reg=None # just to be complete (not really needed)
|
baoboa/Crystal-Space
|
scripts/python/tutorial2.py
|
Python
|
lgpl-2.1
| 8,916 | 0.012225 |
#!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs
import xml.sax
import json
import copy
import logging
import math
import atexit
import tarfile
from datetime import datetime
from xml.sax.handler import ContentHandler
class IO(object):
def __init__(self):
self.defers = []
atexit.register(lambda defers:map(lambda x:x(),defers),self.defers)
def read(self,name,encoding=u'utf8'):
file = codecs.open(name,u'r',encoding)
content = file.read()
file.close()
return content
def tempdir(self):
# temp dir
dir = None
# try shmfs
shmfs = u'/dev/shm'
if os.path.exists(shmfs):
dir = tempfile.mkdtemp(dir=shmfs)
else:
dir = tempfile.mkdtemp()
# defer cleanup
self.defers.append(
(lambda name:
lambda :shutil.rmtree(name))
(dir)
)
return dir
def snapshot(self,content,name=None,encoding=u'utf8',compress=False):
dir = self.tempdir()
# make file
file_name = None
if name is not None:
file_name = name
else:
file_name = str(content.__hash__())
# final path
full_path = os.path.join(dir,file_name)
# do write
temp_file = codecs.open(full_path,u'w',encoding)
temp_file.write(content)
temp_file.close()
if compress:
compress_path = os.path.join(dir,u'%s.tar.bz2' % file_name)
compress_out = tarfile.open(compress_path,u'w:bz2')
compress_out.add(full_path,file_name)
compress_out.close()
full_path = compress_path
return full_path
class Node(object):
def __init__(self,name=u'__root__',parent=None):
self.node = {
u'__name__':name,
u'__content__':[],
u'__attrs__':{},
u'__parent__': parent,
u'__children__':[],
}
def clone(self):
# a shadow copy first
copyed = Node(self[u'__name__'],self[u'__parent__'])
# copy content
copyed[u'__content__'] = list(self[u'__content__'])
# copy attrs
copyed[u'__attrs__'] = dict(self[u'__attrs__'])
# copy children
copyed[u'__children__'] = map(lambda child:child.clone(),self[u'__children__'])
# fix parent
for child in copyed[u'__children__']:
child[u'__parent__'] = copyed
return copyed
def __str__(self):
return self.node.__str__()
def __getitem__(self,name):
return self.node[name]
def __setitem__(self,name,value):
self.node[name] = value
return
def __delitem__(self,name):
del self.node[name]
return
class TinyStyleEngine(object):
def __init__(self,name):
self.styles = json.loads(IO().read(name))
def style(self,name):
return self.styles.get(name,{})
def apply(self,node):
# duplicate
node_name = node[u'__name__']
styles = {}
# if any elemnt level style?
styles.update(self.style(node_name))
# if any class specified?
attrs = node[u'__attrs__']
if u'class' in attrs:
for class_name in filter(lambda x:len(x) > 0 ,attrs[u'class'].split(u' ')):
styles.update(self.style(u'.%s' % class_name))
del attrs[u'class']
# filter emtpy
if u'' in styles:
del styles[u'']
if len(styles) > 0:
# had style prestend?
if u'style' in attrs:
# reconstruct style
for single_style in [ each.strip() for each in attrs['style'].split(u';')]:
single_style = single_style.split(u':')
style_name,style_value = single_style[0].strip(),u':'.join(single_style[1:]).strip()
if len(style_name) > 0:
styles[style_name] = style_value
# build style string
attrs[u'style'] = u''.join([ u'%s:%s;' % (key,value) for key,value in styles.items()])
return node
def decorate(self,root):
root = self.apply(root)
for node in root[u'__children__']:
self.apply(node)
node[u'__children__'] = map(lambda x:self.decorate(x),node[u'__children__'])
return root
class TinyTemplateEngine(ContentHandler):
def __init__(self,template):
xml.sax.parseString(template,self)
def startDocument(self):
# document root dom nodes
self.root = Node()
# current dom node infos
self.current_node = self.root
def startElement(self,name,attrs):
# current container
parent = self.current_node
node = Node(name,parent)
# attach to parent
parent[u'__children__'].append(node)
# parent has content?
parent[u'__content__'].append(u'__node__')
# attach attributes
node_attrs = node[u'__attrs__']
for attr in attrs.getNames():
node_attrs[attr] = attrs.getValue(attr)
# update current node
self.current_node = node
def endElement(self,name):
# update current node
parent = self.current_node[u'__parent__']
if parent is None:
# a root node
self.root[u'__children__'].append(self.current_node)
self.current_node = None
else:
self.current_node = parent
def characters(self,content):
if self.current_node is None:
# no node associate with,drop it
return
self.current_node[u'__content__'].append(content)
class TinyRender(object):
def __init__(self):
self._driver = TinyDataDriver()
def driver(self,driver):
self._driver = driver
return self
def define(self,template):
self.root = TinyTemplateEngine(IO().read(template)).root
return self
def bind(self,binding):
self._driver.evaluate(self.root,binding)
return self
def render(self,style=None):
if style is not None:
self.root = TinyStyleEngine(style).decorate(self.root)
return self.render_node(self.root)
def render_node(self,node):
name = node[u'__name__']
# special case for root node
if name == u'__root__':
return u''.join(map(lambda x:self.render_node(x),node[u'__children__']))
# now ,all node has a not none parent
# build attrs
attrs =u' '.join([ u"%s='%s'" % (key,value) for key,value in node[u'__attrs__'].items()])
# build content
# be care about node content
content = []
children = node[u'__children__']
node_index = 0
# for pretify reason,insert \n when meeting congigous __node__ content
meet_node = False
indention = None
for part in node[u'__content__']:
if part != u'__node__':
meet_node = False
content.append(part)
else:
if meet_node:
# got the right indention,do nothing
content.append(indention)
else:
# find indention
space = 0
done = False
# backtrack the content to find idention
for content_index in range(len(content)-1,-1,-1):
current = content[content_index]
for char in range(len(current)-1,-1,-1):
char = current[char]
if char == u'\n' or char == u'>':
done = True
break
elif char == u' ':
space = space + 1
else:
# consider a intended inline,give up indention pretify
done = True
break
raise Exception(u'should be space or carier return,context:%s' % current)
if done:
break
indention = u'\n%s' % u''.join([u' ' for i in range(space)])
meet_node = True
# special process for node
content.append(self.render_node(children[node_index]))
node_index = node_index + 1
content = ''.join(content)
return u'<%s %s>%s</%s>' % (
name,
attrs,
content,
name,
)
class TinyDataDriver(object):
def __init__(self):
self.binding = {}
magic_prefix = u'_eval_'
self.evaluator = {}
for attr in dir(self):
# magic find
if attr.startswith(magic_prefix):
self.evaluator[attr[len(magic_prefix):].replace(u'_',u'-')] = getattr(self,attr)
def bind(self,name,value):
self.binding[name] = value
def priority_attrs(self,attrs):
# copy
attrs = dict(attrs)
# priority process order
priority = []
if u'tiny-repeat' in attrs:
priority.append(u'tiny-repeat')
del attrs[u'tiny-repeat']
return priority + attrs.keys()
def evaluate(self,node,binding=None):
if node[u'__name__'] == u'__root__':
map(lambda x:self.evaluate_node(x,binding),node[u'__children__'])
else:
raise Exception(u'not a root node,evaluate illege')
def evaluate_node(self,node,binding=None):
# node should had parent
if binding is not None:
self.binding.update(binding)
binding = self.binding
# save parent
parent = node[u'__parent__']
brothers = parent[u'__children__']
contents = parent[u'__content__']
name = node[u'__name__']
# find brother index
brother_match = -1
for i,brother in enumerate(brothers):
if brother == node :
brother_match = i
break
if brother_match == -1:
raise Exception(u'no match node in parent, illege evaluate')
# find content index
content_match = -1
content_meet = 0
for i,content in enumerate(contents):
if content == u'__node__':
content_meet = content_meet + 1
if content_meet == brother_match+1:
content_match = i
break
if content_match == -1:
raise Exception(u'no match content in parent for node content, illege evaluate')
def replace_in_parent(content_match,brother_match,nodes):
for i,node in enumerate(nodes):
brothers.insert( i + brother_match,node )
contents.insert( i + content_match,u'__node__' )
# remove original
total_nodes = len(nodes)
brothers.pop(total_nodes+brother_match)
contents.pop(total_nodes+content_match)
# evaluated container
nodes = [node]
# find evalutior for name
evaluator = self.evaluator.get(name,None)
if evaluator is not None:
nodes = evaluator(node,binding)
# replace
replace_in_parent(content_match,brother_match,nodes)
# now,new nodes are associalted with main tree
# mark node numbers
total_nodes = len(nodes)
# index trackers
# as attrs may generate more nodes also
content_index_tracker = content_match
brother_index_tracker = brother_match
# deal with attrs
for i,node in enumerate(nodes):
# evaluate attr
attrs = node[u'__attrs__']
# new nodes may be generated by attr evaluator,
# defer it.
# or it will have trouble with tree organization
for attr in self.priority_attrs(attrs):
evaluator = self.evaluator.get(attr,None)
if evaluator is not None:
# evaluate
evaluated = evaluator(node,binding)
# replace `this` node
# attach to main tree
replace_in_parent(content_index_tracker,brother_index_tracker,evaluated)
# delegate evalution of new evaluated nodes
map(lambda x:self.evaluate_node(x,binding),evaluated)
# hand out control already
# stop processing
return
# here,means node not changed in main tree,
# process children
for child in node[u'__children__']:
self.evaluate_node(child,binding)
def _eval_tiny_repeat(self,node,binding):
attrs = node[u'__attrs__']
times = eval(attrs[u'tiny-repeat'],binding)
index_name = attrs[u'tiny-repeat-index']
# clear instrument
del attrs[u'tiny-repeat']
del attrs[u'tiny-repeat-index']
# node parent
parent = node[u'__parent__']
# expand content
repeated = []
# reuse bindng context
conflict = None
if index_name in binding:
conflict = binding[index_name]
# generate
for i in range(times):
# bind index value
binding[index_name] = i
# DO copy
# take care of recursive bind
copyed = node.clone()
# node not in parents acctualy,
# so a direct evaluate_node will fail.
# make a isolated container for this node,
# then evalute/evaluate_node will work as expected.
# this is a little wired.
psuedo_root = Node()
psuedo_root[u'__children__'].append(copyed)
psuedo_root[u'__content__'].append(u'__node__')
copyed[u'__parent__'] = psuedo_root
self.evaluate(psuedo_root,binding)
# node is evaluated
# reaper nodes
# re-associate parent
for child in psuedo_root[u'__children__']:
child[u'__parent__'] = parent
repeated.extend(psuedo_root[u'__children__'])
# recover conflict
if conflict is not None:
binding[index_name] = conflict
return repeated
def _eval_tiny_number(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-number'],binding))
# clear instrument
del attrs[u'tiny-number']
if u'tiny-force-integer' in attrs:
# froce integer
del attrs[u'tiny-force-integer']
if not math.isnan(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,}'.format(0))
else:
# fill content
if math.isnan(value):
node[u'__content__'].append(u'N/A')
elif value == int(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,.2f}'.format(value))
if u'tiny-color' in attrs and not math.isnan(value):
del attrs[u'tiny-color']
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
return [node]
def _eval_tiny_percent(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-percent'],binding))
# clear instrument
del attrs[u'tiny-percent']
if not math.isnan(value):
if u'tiny-precision' in attrs:
format = u'{:,.%s%%}' % eval(attrs[u'tiny-precision'],binding)
node[u'__content__'].append(format.format(value))
else:
node[u'__content__'].append(u'{:,.2%}'.format(value))
else:
node[u'__content__'].append(u'N/A')
if u'tiny-default-color' not in attrs:
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
else:
del attrs[u'tiny-default-color']
return [node]
def _eval_tiny_data(self,node,binding):
attrs = node[u'__attrs__']
node[u'__content__'].append(u'%s' % eval(attrs[u'tiny-data'],binding))
# clear instrument
del attrs[u'tiny-data']
return [node]
def _eval_tiny_color_group(self,node,binding):
attrs = node[u'__attrs__']
css = u'tiny-color-group-%s' % eval(attrs[u'tiny-color-group'],binding)
# attach css
if u'class' in attrs:
attrs[u'class'] = u'%s %s' % (attrs[u'class'],css)
else:
attrs[u'class'] = css
# clear instrument
del attrs[u'tiny-color-group']
return [node]
|
zizon/TinyTemplate
|
tiny_template_engine.py
|
Python
|
mit
| 18,193 | 0.014346 |
#!/usr/bin/python2
"""fkmonthgraph - graph of enemy fighter kills & losses, by month
Requires matplotlib, see http://matplotlib.org or search your package
manager (Debian: apt-get install python-matplotlib)
"""
import sys
import hdata, fighterkill
from extra_data import Fighters as extra
import matplotlib.pyplot as plt
if __name__ == '__main__':
showtotal = '--nototal' not in sys.argv
legend = '--nolegend' not in sys.argv
data = fighterkill.extract_kills(sys.stdin)
monthly = {}
month = min(data.keys())
last = max(data.keys())
while month <= last:
next = month.nextmonth()
monthly[month] = {'total':{'kills':0, 'losses':0}, 'kills':[0 for i,f in enumerate(hdata.Fighters)], 'losses':[0 for i,f in enumerate(hdata.Fighters)]}
d = month.copy()
while d < next:
if d in data:
monthly[month]['total']['kills'] += data[d]['total']['kills']
monthly[month]['total']['losses'] += data[d]['total']['losses']
for i,f in enumerate(hdata.Fighters):
monthly[month]['kills'][i] += data[d]['kills'][i]
monthly[month]['losses'][i] += data[d]['losses'][i]
d = d.next()
month = next
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
dates = sorted(monthly.keys())
for fi,f in enumerate(hdata.Fighters):
def ins(m):
return hdata.inservice(m, f) or hdata.inservice(m.nextmonth(), f)
gp = plt.plot_date([d.ordinal() for d in dates if ins(d)], [monthly[d]['kills'][fi] for d in dates if ins(d)], fmt='o-', mew=0, color=extra[f['name']]['colour'], tz=None, xdate=True, ydate=False, label=f['name'], zorder=0)
gl = plt.plot_date([d.ordinal() for d in dates if ins(d)], [-monthly[d]['losses'][fi] for d in dates if ins(d)], fmt='o-', mew=0, color=extra[f['name']]['colour'], tz=None, xdate=True, ydate=False, label=None, zorder=0)
gt = plt.plot_date([d.ordinal() for d in dates], [monthly[d]['total']['kills'] for d in dates], fmt='k+-', tz=None, xdate=True, ydate=False, label='total', zorder=-2)
gb = plt.plot_date([d.ordinal() for d in dates], [-monthly[d]['total']['losses'] for d in dates], fmt='k+-', tz=None, xdate=True, ydate=False, label=None, zorder=-2)
ax.grid(b=True, axis='y')
plt.axhline(y=0, xmin=0, xmax=1, c='k', zorder=-1)
if legend: plt.legend(ncol=2, loc='upper left')
plt.show()
|
ec429/harris
|
stats/fkmonthgraph.py
|
Python
|
gpl-3.0
| 2,243 | 0.022737 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ship/attachment/engine/shared_hutt_medium_engine_s02.iff"
result.attribute_template_id = 8
result.stfName("item_n","ship_attachment")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
obi-two/Rebelion
|
data/scripts/templates/object/tangible/ship/attachment/engine/shared_hutt_medium_engine_s02.py
|
Python
|
mit
| 470 | 0.046809 |
from django.db import models
class Author(models.Model):
name = models.CharField(max_length=20)
def __unicode__(self):
return self.name
class Book(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __unicode__(self):
return self.name
|
LethusTI/supportcenter
|
vendor/django/tests/regressiontests/signals_regress/models.py
|
Python
|
gpl-3.0
| 323 | 0.003096 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class responderparam(base_resource) :
""" Configuration for responser parameter resource. """
def __init__(self) :
self._undefaction = ""
@property
def undefaction(self) :
ur"""Action to perform when policy evaluation creates an UNDEF condition. Available settings function as follows:
* NOOP - Send the request to the protected server.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOOP".
"""
try :
return self._undefaction
except Exception as e:
raise e
@undefaction.setter
def undefaction(self, undefaction) :
ur"""Action to perform when policy evaluation creates an UNDEF condition. Available settings function as follows:
* NOOP - Send the request to the protected server.
* RESET - Reset the request and notify the user's browser, so that the user can resend the request.
* DROP - Drop the request without sending a response to the user.<br/>Default value: "NOOP"
"""
try :
self._undefaction = undefaction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(responderparam_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.responderparam
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update responderparam.
"""
try :
if type(resource) is not list :
updateresource = responderparam()
updateresource.undefaction = resource.undefaction
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of responderparam resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = responderparam()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the responderparam resources that are configured on netscaler.
"""
try :
if not name :
obj = responderparam()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class responderparam_response(base_response) :
def __init__(self, length=1) :
self.responderparam = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.responderparam = [responderparam() for _ in range(length)]
|
benfinke/ns_python
|
nssrc/com/citrix/netscaler/nitro/resource/config/responder/responderparam.py
|
Python
|
apache-2.0
| 4,282 | 0.031761 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Simple lib to connect to a Zabbix agent and request the value of an item.
"""
import socket
def query_agent(**kwargs):
"""
Open a socket to port 10050 on the remote server and query for the number of
processes running via proc.num[<FOO>], where FOO is either zabbix_server or
zabbix_proxy.
"""
query_string = kwargs.get('query_string', 'agent.ping')
query_host = kwargs.get('query_host', '127.0.0.1')
query_port = kwargs.get('query_port', '10050')
try:
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect((query_host, query_port))
except:
return (99999, 'ERROR: {} :: {}:{}'.format(e, query_host, query_port))
else:
connection.send(query_string)
result = connection.recv(8192)
connection.close()
retval = ''.join(x for x in result if x.isdigit())
return (0, retval)
return (0 ,'')
if __name__ == '__main__':
import doctest
doctest.testmod()
|
nikatjef/python-zabbix
|
zabbix/zabbix/get.py
|
Python
|
lgpl-2.1
| 990 | 0.014141 |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
trusted_scons_files = [
'src/shared/ppapi/build.scons',
'src/shared/ppapi_proxy/build.scons',
'src/trusted/plugin/build.scons',
'tests/ppapi_geturl/build.scons',
'tests/ppapi_messaging/build.scons',
'tests/ppapi_browser/ppb_file_system/build.scons',
'tests/ppapi_tests/build.scons', # Build PPAPI tests from Chrome as a .so
]
# Untrusted libraries for use by user code.
untrusted_scons_files = [
'src/shared/ppapi/nacl.scons',
]
# Untrusted libraries for use by system code.
untrusted_irt_scons_files = [
'src/shared/ppapi_proxy/nacl.scons',
]
nonvariant_test_scons_files = [
'tests/ppapi/nacl.scons',
'tests/ppapi_browser/bad/nacl.scons',
'tests/ppapi_browser/crash/nacl.scons',
'tests/ppapi_browser/extension_mime_handler/nacl.scons',
'tests/ppapi_browser/manifest/nacl.scons',
'tests/ppapi_browser/ppb_core/nacl.scons',
'tests/ppapi_browser/ppb_dev/nacl.scons',
'tests/ppapi_browser/ppb_file_system/nacl.scons',
'tests/ppapi_browser/ppb_graphics2d/nacl.scons',
'tests/ppapi_browser/ppb_graphics3d/nacl.scons',
'tests/ppapi_browser/ppb_image_data/nacl.scons',
'tests/ppapi_browser/ppb_instance/nacl.scons',
'tests/ppapi_browser/ppb_memory/nacl.scons',
'tests/ppapi_browser/ppb_pdf/nacl.scons',
'tests/ppapi_browser/ppb_scrollbar/nacl.scons',
'tests/ppapi_browser/ppb_url_loader/nacl.scons',
'tests/ppapi_browser/ppb_url_request_info/nacl.scons',
'tests/ppapi_browser/ppb_var/nacl.scons',
'tests/ppapi_browser/ppb_widget/nacl.scons',
'tests/ppapi_browser/ppp_input_event/nacl.scons',
'tests/ppapi_browser/ppp_instance/nacl.scons',
'tests/ppapi_browser/progress_events/nacl.scons',
'tests/ppapi_browser/stress_many_nexes/nacl.scons',
'tests/ppapi_example_2d/nacl.scons',
'tests/ppapi_example_audio/nacl.scons',
'tests/ppapi_example_events/nacl.scons',
# TODO(dspringer): re-enable test once the 3D ABI has stabilized. See
# http://code.google.com/p/nativeclient/issues/detail?id=2060
# 'tests/ppapi_example_gles2/nacl.scons',
'tests/ppapi_example_post_message/nacl.scons',
'tests/ppapi_geturl/nacl.scons',
'tests/ppapi_gles_book/nacl.scons',
'tests/ppapi_messaging/nacl.scons',
# Broken by Chrome change
# http://code.google.com/p/nativeclient/issues/detail?id=2480
#'tests/ppapi_simple_tests/nacl.scons',
'tests/ppapi_test_example/nacl.scons',
'tests/ppapi_test_lib/nacl.scons',
'tests/ppapi_tests/nacl.scons',
]
|
robclark/chromium
|
ppapi/native_client/ppapi_scons_files.py
|
Python
|
bsd-3-clause
| 2,682 | 0.000746 |
# Copyright (c) 2015-2016 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various test data that may be shared across multiple tests.
# Naming follows the approximate format:
#
# <kind>_name<idx>_rev<revision>_<key attributes>
#
# Incrementing name indexes indicate the order in which they would be listed.
#
# The rev (a-z) indicates that it should be possible to switch between different
# revisions of the same data.
#
# The key attributes provide some useful additonal data, for example (a v4 specific
# resource).
import netaddr
from utils import API_VERSION
#
# IPPools
#
ippool_name1_rev1_v4 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name1'
},
'spec': {
'cidr': "10.0.1.0/24",
'ipipMode': 'Always',
'blockSize': 27
}
}
ippool_name1_rev2_v4 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name1'
},
'spec': {
'cidr': "10.0.1.0/24",
'ipipMode': 'Never',
}
}
ippool_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name2'
},
'spec': {
'cidr': "fed0:8001::/64",
'ipipMode': 'Never',
'blockSize': 123
}
}
#
# BGPPeers
#
bgppeer_name1_rev1_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node1',
'peerIP': '192.168.0.250',
'asNumber': 64514,
},
}
bgppeer_name1_rev2_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node2',
'peerIP': '192.168.0.251',
'asNumber': 64515,
},
}
bgppeer_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-456def',
},
'spec': {
'node': 'node2',
'peerIP': 'fd5f::6:ee',
'asNumber': 64590,
},
}
#
# Network Policy
#
networkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selector': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
networkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
networkpolicy_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy2',
'namespace': 'default',
'generateName': 'test-policy-',
'deletionTimestamp': '2006-01-02T15:04:07Z',
'deletionGracePeriodSeconds': 30,
'ownerReferences': [{
'apiVersion': 'extensions/v1beta1',
'blockOwnerDeletion': True,
'controller': True,
'kind': 'DaemonSet',
'name': 'endpoint1',
'uid': 'test-uid-change',
}],
'initializers': {
'pending': [{
'name': 'initializer1',
}],
'result': {
'status': 'test-status',
},
},
'clusterName': 'cluster1',
'labels': {'label1': 'l1', 'label2': 'l2'},
'annotations': {'key': 'value'},
'selfLink': 'test-self-link',
'uid': 'test-uid-change',
'generation': 3,
'finalizers': ['finalizer1', 'finalizer2'],
'creationTimestamp': '2006-01-02T15:04:05Z',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global Network Policy
#
globalnetworkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selector': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
globalnetworkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'doNotTrack': True,
'applyOnForward': True,
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global network sets
#
globalnetworkset_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': [
"10.0.0.1",
"11.0.0.0/16",
"feed:beef::1",
"dead:beef::96",
]
}
}
# A network set with a large number of entries. In prototyping this test, I found that there are
# "upstream" limits that cap how large we can go:
#
# - Kubernetes' gRPC API has a 4MB message size limit.
# - etcdv3 has a 1MB value size limit.
many_nets = []
for i in xrange(10000):
many_nets.append("10.%s.%s.0/28" % (i >> 8, i % 256))
globalnetworkset_name1_rev1_large = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': many_nets,
}
}
#
# Host Endpoints
#
hostendpoint_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'database'},
},
'spec': {
'interfaceName': 'eth0',
'profiles': ['prof1', 'prof2'],
'node': 'host1'
}
}
hostendpoint_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend'}
},
'spec': {
'interfaceName': 'cali7',
'profiles': ['prof1', 'prof2'],
'node': 'host2'
}
}
hostendpoint_name1_rev3 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend', 'misc': 'version1'},
'annotations': {'key': 'value'},
'selfLink': 'test-self-link',
'uid': 'test-uid-change',
'generation': 3,
'finalizers': ['finalizer1', 'finalizer2'],
'creationTimestamp': '2006-01-02T15:04:05Z',
},
'spec': {
'interfaceName': 'cali7',
'profiles': ['prof1', 'prof2'],
'node': 'host2'
}
}
#
# Profiles
#
profile_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'labels': {'foo': 'bar'},
'name': 'profile-name1'
},
'spec': {
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"
}
}
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'"},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:20'],
'selector': "type=='application'",
}
}
],
}
}
profile_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'name': 'profile-name1',
},
'spec': {
'egress': [
{
'action': 'Allow'
}
],
'ingress': [
{
'ipVersion': 6,
'action': 'Deny',
},
],
}
}
#
# Workload Endpoints
#
workloadendpoint_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'WorkloadEndpoint',
'metadata': {
'labels': {
'projectcalico.org/namespace': 'namespace1',
'projectcalico.org/orchestrator': 'k8s',
'type': 'database',
},
'name': 'node1-k8s-abcd-eth0',
'namespace': 'namespace1',
},
'spec': {
'node': 'node1',
'orchestrator': 'k8s',
'pod': 'abcd',
'endpoint': 'eth0',
'containerID': 'container1234',
'ipNetworks': ['1.2.3.4/32'],
'interfaceName': 'cali1234',
'profiles': ['prof1', 'prof2'],
}
}
workloadendpoint_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'WorkloadEndpoint',
'metadata': {
'labels': {
'projectcalico.org/namespace': 'namespace1',
'projectcalico.org/orchestrator': 'cni',
'type': 'database'
},
'name': 'node2-cni-container1234-eth0',
'namespace': 'namespace1',
},
'spec': {
'node': 'node2',
'orchestrator': 'cni',
'endpoint': 'eth0',
'containerID': 'container1234',
'ipNetworks': ['1.2.3.4/32'],
'interfaceName': 'cali1234',
'profiles': ['prof1', 'prof2'],
}
}
#
# Nodes
#
node_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node1',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.4/24',
'ipv6Address': 'aa:bb:cc::ff/120',
}
}
}
node_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node2',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.5/24',
'ipv6Address': 'aa:bb:cc::ee/120',
}
}
}
node_name3_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node3',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.6/24',
'ipv6Address': 'aa:bb:cc::dd/120',
}
}
}
#
# BGPConfigs
#
bgpconfig_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Info',
'nodeToNodeMeshEnabled': True,
'asNumber': 6512,
}
}
bgpconfig_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Info',
'nodeToNodeMeshEnabled': False,
'asNumber': 6511,
}
}
bgpconfig_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Info',
}
}
bgpconfig_name2_rev2 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Debug',
}
}
bgpconfig_name2_rev3 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Debug',
'nodeToNodeMeshEnabled': True,
}
}
#
# FelixConfigs
#
felixconfig_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'chainInsertMode': 'append',
'defaultEndpointToHostAction': 'Accept',
'failsafeInboundHostPorts': [
{'protocol': 'TCP', 'port': 666},
{'protocol': 'UDP', 'port': 333}, ],
'failsafeOutboundHostPorts': [
{'protocol': 'TCP', 'port': 999},
{'protocol': 'UDP', 'port': 222},
{'protocol': 'UDP', 'port': 422}, ],
'interfacePrefix': 'humperdink',
'ipipMTU': 1521,
'ipsetsRefreshInterval': '44s',
'iptablesFilterAllowAction': 'Return',
'iptablesLockFilePath': '/run/fun',
'iptablesLockProbeInterval': '500ms',
'iptablesLockTimeout': '22s',
'iptablesMangleAllowAction': 'Accept',
'iptablesMarkMask': 0xff0000,
'iptablesPostWriteCheckInterval': '12s',
'iptablesRefreshInterval': '22s',
'ipv6Support': True,
'logFilePath': '/var/log/fun.log',
'logPrefix': 'say-hello-friend',
'logSeverityScreen': 'Info',
'maxIpsetSize': 8192,
'metadataAddr': '127.1.1.1',
'metadataPort': 8999,
'netlinkTimeout': '10s',
'prometheusGoMetricsEnabled': True,
'prometheusMetricsEnabled': True,
'prometheusMetricsPort': 11,
'prometheusProcessMetricsEnabled': True,
'reportingInterval': '10s',
'reportingTTL': '99s',
'routeRefreshInterval': '33s',
'usageReportingEnabled': False,
}
}
felixconfig_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'ipv6Support': False,
'logSeverityScreen': 'Debug',
'netlinkTimeout': '11s',
}
}
# The large values for `netlinkTimeout` and `reportingTTL` will be transformed
# into a different unit type in the format `XhXmXs`.
felixconfig_name1_rev3 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'ipv6Support': False,
'logSeverityScreen': 'Debug',
'netlinkTimeout': '125s',
'reportingTTL': '9910s',
}
}
#
# ClusterInfo
#
clusterinfo_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'ClusterInformation',
'metadata': {
'name': 'default',
},
'spec': {
'clusterGUID': 'cluster-guid1',
'datastoreReady': True,
}
}
clusterinfo_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'ClusterInformation',
'metadata': {
'name': 'default',
},
'spec': {
'clusterGUID': 'cluster-guid2',
'clusterType': 'cluster-type2',
'calicoVersion': 'calico-version2',
}
}
|
insequent/calico-docker
|
tests/st/utils/data.py
|
Python
|
apache-2.0
| 17,951 | 0.000223 |
"""
kombu.common
============
Common Utilities.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import with_statement
import socket
import sys
from collections import defaultdict, deque
from functools import partial
from itertools import count
from . import serialization
from .entity import Exchange, Queue
from .log import Log
from .messaging import Consumer as _Consumer
from .utils import uuid
__all__ = ["Broadcast", "entry_to_queue", "maybe_declare", "uuid",
"itermessages", "send_reply", "isend_reply",
"collect_replies", "insured", "ipublish"]
declared_entities = defaultdict(lambda: set())
insured_logger = Log("kombu.insurance")
class Broadcast(Queue):
"""Convenience class used to define broadcast queues.
Every queue instance will have a unique name,
and both the queue and exchange is configured with auto deletion.
:keyword name: This is used as the name of the exchange.
:keyword queue: By default a unique id is used for the queue
name for every consumer. You can specify a custom queue
name here.
:keyword \*\*kwargs: See :class:`~kombu.entity.Queue` for a list
of additional keyword arguments supported.
"""
def __init__(self, name=None, queue=None, **kwargs):
return super(Broadcast, self).__init__(
name=queue or "bcast.%s" % (uuid(), ),
**dict({"alias": name,
"auto_delete": True,
"exchange": Exchange(name, type="fanout"),
}, **kwargs))
def maybe_declare(entity, channel, retry=False, **retry_policy):
if retry:
return _imaybe_declare(entity, channel, **retry_policy)
return _maybe_declare(entity, channel)
def _maybe_declare(entity, channel):
declared = declared_entities[channel.connection.client]
if not entity.is_bound:
entity = entity.bind(channel)
if not entity.can_cache_declaration or entity not in declared:
entity.declare()
declared.add(entity)
return True
return False
def _imaybe_declare(entity, channel, **retry_policy):
entity = entity(channel)
return channel.connection.client.ensure(entity, _maybe_declare,
**retry_policy)(entity, channel)
def itermessages(conn, channel, queue, limit=1, timeout=None,
Consumer=_Consumer, **kwargs):
acc = deque()
def on_message(body, message):
acc.append((body, message))
with Consumer(channel, [queue], callbacks=[on_message], **kwargs):
for _ in eventloop(conn, limit=limit, timeout=timeout,
ignore_timeouts=True):
try:
yield acc.popleft()
except IndexError:
pass
def eventloop(conn, limit=None, timeout=None, ignore_timeouts=False):
"""Best practice generator wrapper around ``Connection.drain_events``.
Able to drain events forever, with a limit, and optionally ignoring
timeout errors (a timeout of 1 is often used in environments where
the socket can get "stuck", and is a best practice for Kombu consumers).
**Examples**
``eventloop`` is a generator::
>>> from kombu.common import eventloop
>>> it = eventloop(connection, timeout=1, ignore_timeouts=True)
>>> it.next() # one event consumed, or timed out.
>>> for _ in eventloop(connection, timeout=1, ignore_timeouts=True):
... pass # loop forever.
It also takes an optional limit parameter, and timeout errors
are propagated by default::
for _ in eventloop(connection, limit=1, timeout=1):
pass
.. seealso::
:func:`itermessages`, which is an event loop bound to one or more
consumers, that yields any messages received.
"""
for i in limit and xrange(limit) or count():
try:
yield conn.drain_events(timeout=timeout)
except socket.timeout:
if timeout and not ignore_timeouts:
raise
except socket.error:
pass
def send_reply(exchange, req, msg, producer=None, **props):
content_type = req.content_type
serializer = serialization.registry.type_to_name[content_type]
maybe_declare(exchange, producer.channel)
producer.publish(msg, exchange=exchange,
**dict({"routing_key": req.properties["reply_to"],
"correlation_id": req.properties.get("correlation_id"),
"serializer": serializer},
**props))
def isend_reply(pool, exchange, req, msg, props, **retry_policy):
return ipublish(pool, send_reply,
(exchange, req, msg), props, **retry_policy)
def collect_replies(conn, channel, queue, *args, **kwargs):
no_ack = kwargs.setdefault("no_ack", True)
received = False
for body, message in itermessages(conn, channel, queue, *args, **kwargs):
if not no_ack:
message.ack()
received = True
yield body
if received:
channel.after_reply_message_received(queue.name)
def _ensure_errback(exc, interval):
insured_logger.error(
"Connection error: %r. Retry in %ss\n" % (exc, interval),
exc_info=sys.exc_info())
def revive_connection(connection, channel, on_revive=None):
if on_revive:
on_revive(channel)
def revive_producer(producer, channel, on_revive=None):
revive_connection(producer.connection, channel)
if on_revive:
on_revive(channel)
def insured(pool, fun, args, kwargs, errback=None, on_revive=None, **opts):
"""Ensures function performing broker commands completes
despite intermittent connection failures."""
errback = errback or _ensure_errback
with pool.acquire(block=True) as conn:
conn.ensure_connection(errback=errback)
# we cache the channel for subsequent calls, this has to be
# reset on revival.
channel = conn.default_channel
revive = partial(revive_connection, conn, on_revive=on_revive)
insured = conn.autoretry(fun, channel, errback=errback,
on_revive=revive, **opts)
retval, _ = insured(*args, **dict(kwargs, connection=conn))
return retval
def ipublish(pool, fun, args=(), kwargs={}, errback=None, on_revive=None,
**retry_policy):
with pool.acquire(block=True) as producer:
errback = errback or _ensure_errback
revive = partial(revive_producer, producer, on_revive=on_revive)
f = producer.connection.ensure(producer, fun, on_revive=revive,
errback=errback, **retry_policy)
return f(*args, **dict(kwargs, producer=producer))
def entry_to_queue(queue, **options):
binding_key = options.get("binding_key") or options.get("routing_key")
e_durable = options.get("exchange_durable")
if e_durable is None:
e_durable = options.get("durable")
e_auto_delete = options.get("exchange_auto_delete")
if e_auto_delete is None:
e_auto_delete = options.get("auto_delete")
q_durable = options.get("queue_durable")
if q_durable is None:
q_durable = options.get("durable")
q_auto_delete = options.get("queue_auto_delete")
if q_auto_delete is None:
q_auto_delete = options.get("auto_delete")
e_arguments = options.get("exchange_arguments")
q_arguments = options.get("queue_arguments")
b_arguments = options.get("binding_arguments")
exchange = Exchange(options.get("exchange"),
type=options.get("exchange_type"),
delivery_mode=options.get("delivery_mode"),
routing_key=options.get("routing_key"),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get("exclusive"),
auto_delete=q_auto_delete,
no_ack=options.get("no_ack"),
queue_arguments=q_arguments,
binding_arguments=b_arguments)
|
kumar303/rockit
|
vendor-local/kombu/common.py
|
Python
|
bsd-3-clause
| 8,390 | 0.001073 |
#!/usr/bin/python
#
# Copyright 2014, Intel Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# native
'''
Created on 13 oct. 2014
@author: ronan.lemartret@open.eurogiciel.org
'''
try:
import cmdln
except:
print >> sys.stderr, 'Error spec2yocto require "python-cmdln" please install it.'
sys.exit( 1 )
import sys
import os
#TODO need precision
#WARNING if patch can be a gz file
#WARNING if patch can be conpose by many commit
def isPatch(files) :
return (".diff" in files) or (".patch" in files)
#TODO need precision
def isFromIntel(patch_file) :
if patch_file.endswith('.diff') or patch_file.endswith('.patch'):
with open(patch_file,"r") as patch_fd:
for line in patch_fd:
if line.startswith("From:") and (("intel.com" in line) or ("eurogiciel.org" in line) or ("fridu.org" in line)):
return True
return False
def count_intel_patch(SRCDIR,package_files):
count_intel_patch=0
for p in package_files:
if isPatch( p) and isFromIntel(os.path.join(SRCDIR,p)):
count_intel_patch+=1
return count_intel_patch
def count_patch(package_files) :
count_patch=0
for p in package_files:
if isPatch( p):
count_patch+=1
return count_patch
#What if many spec file?
def get_license(SRCDIR,package_files) :
license=""
for p in package_files:
if (".spec" in p):
return find_license(os.path.join(SRCDIR,p))
return license
#What if many license file?
#TODO need precision
def find_license(spec_file) :
license=""
with open(spec_file,"r") as spec_fd:
for line in spec_fd:
if "License:" in line:
return line.split("License:")[1].replace("\n","").replace("\t","").replace(" ","")
return license
class CheckRpmSrc(cmdln.Cmdln):
name = "createVersionYoctoTizen"
version = "0.1"
@cmdln.option( "--rpmsSRCDIR",
action = "store",
default = "Tizen-rpm-source.html",
help = "the Tizen rpms source dir" )
def do_status(self, subcmd, opts):
"""generate status
${cmd_usage}--
${cmd_option_list}
"""
for package_rpm in os.listdir(opts.rpmsSRCDIR):
package_dir=package_rpm
release=package_rpm[package_rpm.rfind("-")+1:].replace(".src.rpm","")
package_rpm=package_rpm[:package_rpm.rfind("-")]
version=package_rpm[package_rpm.rfind("-")+1:]
name=package_rpm[:package_rpm.rfind("-")]
package_files = os.listdir(os.path.join(opts.rpmsSRCDIR, package_dir))
nb_patch=count_patch(package_files)
license=get_license(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
nb_intel_patch=count_intel_patch(os.path.join(opts.rpmsSRCDIR, package_dir),package_files)
print "%s\t%s\t%s\t%s\t%s" %(name, version, license, nb_patch, nb_intel_patch)
def main():
checkRpmSrc = CheckRpmSrc()
sys.exit( checkRpmSrc.main() )
if __name__ == '__main__':
main()
|
eurogiciel-oss/Tizen-development-report
|
bin/checkRpmSrc.py
|
Python
|
mit
| 3,700 | 0.026216 |
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import CallResult
from autobahn.twisted.wamp import ApplicationSession
class Component(ApplicationSession):
"""
Application component that provides procedures which
return complex results.
"""
def onConnect(self):
self.join("realm1")
def onJoin(self, details):
def add_complex(a, ai, b, bi):
return CallResult(c = a + b, ci = ai + bi)
self.register(add_complex, 'com.myapp.add_complex')
def split_name(fullname):
forename, surname = fullname.split()
return CallResult(forename, surname)
self.register(split_name, 'com.myapp.split_name')
|
robtandy/AutobahnPython
|
examples/twisted/wamp/basic/rpc/complex/backend.py
|
Python
|
apache-2.0
| 1,506 | 0.017928 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Package contenant la commande 'scripting alerte info'."""
from primaires.interpreteur.masque.parametre import Parametre
from primaires.format.fonctions import echapper_accolades
from primaires.format.date import get_date
class PrmInfo(Parametre):
"""Commande 'scripting alerte info'"""
def __init__(self):
"""Constructeur du paramètre."""
Parametre.__init__(self, "info", "info")
self.schema = "<nombre>"
self.aide_courte = "affiche des informations sur l'alerte"
self.aide_longue = \
"Affiche des informations sur l'alerte permettant de la corriger."
def interpreter(self, personnage, dic_masques):
"""Méthode d'interprétation de commande"""
nombre = dic_masques["nombre"].nombre
try:
alerte = type(self).importeur.scripting.alertes[nombre]
except KeyError:
personnage << "|err|Ce numéro d'alerte est invalide.|ff|"
else:
msg = "Informations sur l'alerte {} :".format(alerte.no)
msg += "\n S'est produit sur {} {}".format(alerte.type,
alerte.objet) + " " + get_date(alerte.date.timetuple())
msg += "\n Evenement {}, test {}, ligne {}".format(
alerte.evenement, echapper_accolades(alerte.test),
alerte.no_ligne)
msg += "\n {}\n".format(echapper_accolades(alerte.ligne))
msg += "\n Message d'erreur : |err|{}|ff|".format(
echapper_accolades(alerte.message))
if personnage.nom_groupe == "administrateur":
msg += "\n Traceback Python :\n {}".format(
echapper_accolades(alerte.traceback))
personnage << msg
|
vlegoff/tsunami
|
src/primaires/scripting/commandes/scripting/alerte_info.py
|
Python
|
bsd-3-clause
| 3,352 | 0.002688 |
from typing import List, Any, Mapping
from .utils import clean_filters
class DockerTasks(object):
def __init__(self, docker):
self.docker = docker
async def list(self, *, filters: Mapping=None) -> List[Mapping]:
"""
Return a list of tasks
Args:
filters: a collection of filters
Available filters:
desired-state=(running | shutdown | accepted)
id=<task id>
label=key or label="key=value"
name=<task name>
node=<node id or name>
service=<service name>
"""
params = {"filters": clean_filters(filters)}
response = await self.docker._query_json(
"tasks",
method='GET',
params=params
)
return response
async def inspect(self, task_id: str) -> Mapping[str, Any]:
"""
Return info about a task
Args:
task_id: is ID of the task
"""
response = await self.docker._query_json(
"tasks/{task_id}".format(task_id=task_id),
method='GET',
)
return response
|
barrachri/aiodocker
|
aiodocker/tasks.py
|
Python
|
apache-2.0
| 1,131 | 0.001768 |
import zstackwoodpecker.operations.host_operations as host_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.test_util as test_util
volume = None
disconnect = False
host = None
def test():
global disconnect, volume, host
# query&get clusters
cond = res_ops.gen_query_conditions('name', '=', "cluster1")
cluster1 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
cond = res_ops.gen_query_conditions('name', '=', "cluster2")
cluster2 = res_ops.query_resource(res_ops.CLUSTER, cond)[0]
# query&get hosts
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster1.uuid)
cluster1_host = res_ops.query_resource(res_ops.HOST, cond)
cond = res_ops.gen_query_conditions('clusterUuid', '=', cluster2.uuid)
cluster2_host = res_ops.query_resource(res_ops.HOST, cond)
# disconnect mn_host1
host = cluster1_host[0]
host_ops.update_kvm_host(host.uuid, 'username', "root1")
try:
host_ops.reconnect_host(host.uuid)
except:
test_util.test_logger("host: [%s] is disconnected" % host.uuid)
disconnect = True
# create_volume on 2 clusters
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
systemtags1 = ["volumeProvisioningStrategy::ThickProvisioning", "capability::virtio-scsi",
"miniStorage::clusterUuid::%s" % cluster1.uuid]
volume_creation_option = test_util.VolumeOption()
volume_creation_option.set_name("cluster1_volume")
volume_creation_option.set_primary_storage_uuid(ps.uuid)
volume_creation_option.set_system_tags(systemtags1)
volume_creation_option.set_diskSize(2 * 1024 * 1024 * 1024)
try:
volume_inv = vol_ops.create_volume_from_diskSize(volume_creation_option)
except Exception as e:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
print e.message.encode("utf-8")
def error_cleanup():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
def env_recover():
global host, disconnect
if disconnect:
host_ops.update_kvm_host(host.uuid, 'username', "root")
host_ops.reconnect_host(host.uuid)
disconnect = False
|
zstackio/zstack-woodpecker
|
integrationtest/vm/mini/multiclusters/test_disconnect_host_volume_create_negative1.py
|
Python
|
apache-2.0
| 2,411 | 0.001244 |
# 1. Convert 1024 to binary and hexadecimal representation:
x = 1024
y = bin(x)
z = hex(x)
|
dmchu/selenium_gr_5
|
git_tests/second_file.py
|
Python
|
apache-2.0
| 93 | 0.010753 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'designer/getaddons.ui'
#
# Created: Fri Aug 22 00:57:31 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(367, 204)
self.verticalLayout = QtGui.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.label = QtGui.QLabel(Dialog)
self.label.setWordWrap(True)
self.label.setObjectName(_fromUtf8("label"))
self.verticalLayout.addWidget(self.label)
spacerItem = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_2 = QtGui.QLabel(Dialog)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.code = QtGui.QLineEdit(Dialog)
self.code.setObjectName(_fromUtf8("code"))
self.horizontalLayout.addWidget(self.code)
self.verticalLayout.addLayout(self.horizontalLayout)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_("Install Add-on"))
self.label.setText(_("To browse add-ons, please click the browse button below.<br><br>When you\'ve found an add-on you like, please paste its code below."))
self.label_2.setText(_("Code:"))
|
weihautin/anki
|
aqt/forms/getaddons.py
|
Python
|
agpl-3.0
| 2,703 | 0.00333 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for GetFinding
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-websecurityscanner
# [START websecurityscanner_v1alpha_generated_WebSecurityScanner_GetFinding_sync]
from google.cloud import websecurityscanner_v1alpha
def sample_get_finding():
# Create a client
client = websecurityscanner_v1alpha.WebSecurityScannerClient()
# Initialize request argument(s)
request = websecurityscanner_v1alpha.GetFindingRequest(
name="name_value",
)
# Make the request
response = client.get_finding(request=request)
# Handle the response
print(response)
# [END websecurityscanner_v1alpha_generated_WebSecurityScanner_GetFinding_sync]
|
googleapis/python-websecurityscanner
|
samples/generated_samples/websecurityscanner_v1alpha_generated_web_security_scanner_get_finding_sync.py
|
Python
|
apache-2.0
| 1,528 | 0.001309 |
#
# Copyright (c) 2016-2018 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Description:
# This script takes an ELF file, converts it to its binary in-memory
# representation, signs it (using either an online service or a
# locally-stored key), and replaces the contents of the specified
# ELF section with the generated signature.
#
# Usage:
# elf_sign.py <elfPath> <signingKey> <sectionName> <elf2binCmd>
#
# elfPath - Path to the ELF we want to sign
# signingKey - Key to use (either a service URL or a local key path)
# sectionName - ELF section to replace with signature
# elf2binCmd - Command for converting ELF to in-memory binary representation
#
import sys, os, subprocess, tempfile, collections, hashlib, binascii, pyasn1, pyasn1.codec.ber.decoder, pycurl, StringIO
SIGNATURE_NONE = b'\x00'
SIGNATURE_ECDSA_SHA256_SECP224R1 = b'\x01'
# Dump integer to a C array, used for debugging only
def c_array(name, val):
length = len(val)
res = ""
for i in range(0, length):
if (i % 16) == 0:
res = res + " "
res = res + "0x%02x, " % val[i]
if ((i % 16) == 15) | (i == (length - 1)):
res = res + "\n"
res = " uint8_t " + name + "[] = {\n" + res + " };"
print res
# Read a little endian value from an ELF file
def elf_read_le(buf, offset, n):
val = 0
for i in range(0, n):
val = (val << 8) | ord(buf[offset + n - 1 - i])
return val
# Replace the contents of an ELF section. Used to replace blank signature data with the actual signature.
# binutils objcopy has a new command '--update-section .sectionname=file', but is not yet available. This does the same thing.
def elf_update_section(elfPath, sectionName, sectionData):
sectionTuple = collections.namedtuple("sectionTuple", "name_offset, name, offset, size")
# Read in the original ELF
elfSize = os.stat(elfPath).st_size
elf = open(elfPath, "rb")
output = elf.read()
elf.close()
# Do some sanity checking on the ELF file headers
magic = output[0:4]
assert magic == b'\x7fELF', 'Magic number does not match'
ei_class = output[4]
assert ei_class == b'\x01', 'Only 32-bit ELF files are supported'
ei_data = output[5]
assert ei_class == b'\x01', "Only LE ELF files are supported"
ei_version = output[6]
assert ei_version == b'\x01', "Only ELF version 1 is supported"
e_shoff = elf_read_le(output, 0x20, 4) # Start of section header table
e_shentsize = elf_read_le(output, 0x2e, 2) # Size of a section header table entry
e_shnum = elf_read_le(output, 0x30, 2) # Number of entries in the section header table
e_shstrndx = elf_read_le(output, 0x32, 2) # Index of the section header table entry that contains the section names
assert (e_shoff + (e_shnum * e_shentsize)) <= elfSize, "Section header runs past end of file"
assert e_shstrndx <= e_shnum, "Section name index > number of sections"
# Read in all the sections in the table
sections = []
for i in range(0, e_shnum):
sh_name = elf_read_le(output, (e_shoff + (i * e_shentsize) + 0), 4)
sh_offset = elf_read_le(output, (e_shoff + (i * e_shentsize) + 16), 4)
sh_size = elf_read_le(output, (e_shoff + (i * e_shentsize) + 20), 4)
assert (sh_offset + sh_size) <= elfSize, "Section data runs past end of file"
s = sectionTuple(name_offset = sh_name, name = "", offset = sh_offset, size = sh_size)
sections.append(s)
# Lookup the section names
for i in range(0, e_shnum):
s = sectionTuple(name_offset = sections[i].name_offset, \
name = output[(sections[e_shstrndx].offset + sections[i].name_offset):].partition(b'\x00')[0], \
offset = sections[i].offset, \
size = sections[i].size)
sections[i] = s
# Find the section we want to update
sectionIndex = -1
for i in range(0, e_shnum):
if sections[i].name == sectionName:
sectionIndex = i
assert sectionIndex >= 0, "Section %s not found in ELF" % sectionName
assert len(sectionData) == sections[sectionIndex].size, "Size of signature data file (%d) doesn't match size of section (%d)" % (len(sectionData), sections[sectionIndex].size)
# Replace the ELF section with the new content
output = output[0:sections[sectionIndex].offset] + \
sectionData + \
output[(sections[sectionIndex].offset + sections[sectionIndex].size):]
elf = open(elfPath, "wb")
elf.write(output)
elf.close();
# Dump an integer as a byte array, in the big endian format used by micro-ecc
def int_to_bytearray(val, length):
res = bytearray(length)
for i in range(0, length):
res[length - (1 + i)] = (val & 0xff)
val = (val & ~0xff) >> 8
assert val == 0, "Dumped int to C array, but length %i not big enough" % length
return res
def main(argv):
elfPath = sys.argv[1] # Path to the ELF we want to sign
signingKey = sys.argv[2] # Key to use (either a service URL or a local key path)
sectionName = sys.argv[3] # ELF section to replace with signature
elf2binCmd = sys.argv[4] # Command for converting ELF to in-memory binary representation
# Generate a tempfile that we can dump the binary to. Objdump cannot dump to a pipe.
tempBinFile = tempfile.NamedTemporaryFile();
elf2binCmdline = elf2binCmd + " " + elfPath + " " + tempBinFile.name
if 'debug' in globals():
print "Signing %s, section '%s' using %s" % (elfPath, sectionName, signingKey)
print "Generating bin using '%s'" % elf2binCmdline
# Generate the binary that we sign (the provided command removes the signature placeholder section)
os.system(elf2binCmdline);
# Compute the SHA-256 hash of the image we are signing
h = open(tempBinFile.name)
hash = binascii.hexlify(hashlib.sha256(h.read()).digest())
h.close()
# Dump out the length and hash of the signed image
if 'debug' in globals():
print "Signed length = %d bytes" % os.stat(tempBinFile.name).st_size
print "Image SHA-256 = %s" % hash
# If the signingKey looks like a URL, we do online signing; otherwise, use a locally stored key
if signingKey.startswith('https://'):
# Append the hash to the URL
signingKey = signingKey + "&hash=" + hash
if 'debug' in globals():
print "Signing using remote service URL: %s" % signingKey
# Get the auth parameter that should have been exported from the environment
assert 'auth' in os.environ, "Signing service credentials 'auth' not exported from environment"
# Use cURL to request signing by the service
buffer = StringIO.StringIO()
curl = pycurl.Curl()
curl.setopt(pycurl.URL, signingKey)
if 'allowSelfSignedTLSCerts' in globals():
curl.setopt(pycurl.SSL_VERIFYPEER, False)
curl.setopt(pycurl.FAILONERROR, True)
curl.setopt(pycurl.WRITEDATA, buffer)
curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC)
curl.setopt(pycurl.USERPWD, os.environ['auth'])
try:
curl.perform()
except pycurl.error as e:
# Handle HTTP error return codes user the assert below, to make it easier to diagnose issues
if e.args[0] != curl.E_HTTP_RETURNED_ERROR:
raise e
http_code = curl.getinfo(pycurl.HTTP_CODE)
assert http_code == 200, "HTTP error %d returned by service" % http_code
curl.close()
signature = buffer.getvalue()
assert len(signature) == (2 * 60), "Signature returned by service has wrong length (%d != %d)" % (len(signature), (2 * 60))
if 'debug' in globals():
print "Service returned signature: %s" % signature
sig = bytearray(binascii.unhexlify(signature))
else:
if 'debug' in globals():
print "Signing using locally stored key"
# Sign the binary, currently using a local key and OpenSSL.
process = subprocess.Popen(["openssl", "dgst", "-sha256", "-sign", signingKey, tempBinFile.name], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
# Extract the signature values from the DER output
res = pyasn1.codec.ber.decoder.decode(stdout)[0]
assert len(res) == 2, "Expected 2 values in generated EC signature, got %d" % len(res)
assert isinstance(res.getComponentByPosition(0), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
assert isinstance(res.getComponentByPosition(1), pyasn1.type.univ.Integer), "EC signature result values weren't integers"
r = int(res.getComponentByPosition(0))
s = int(res.getComponentByPosition(1))
# Generate the signature block.
# The size of this signature block needs to match the size of the signature
# placeholder section that was set aside in the linker script.
# The signature data (r,s) also needs to be 4-byte aligned.
sig = SIGNATURE_ECDSA_SHA256_SECP224R1 + \
b'\x00\x00\x00' + \
int_to_bytearray(r, (224/8)) + \
int_to_bytearray(s, (224/8))
# Dump out the r,s values
if 'debug' in globals():
c_array("signature_r", int_to_bytearray(r, (224/8)))
c_array("signature_s", int_to_bytearray(s, (224/8)))
# Dump out the complete generated signature
if 'debug' in globals():
c_array("signature", sig)
# Update the ELF section with the generated signature data
elf_update_section(elfPath, sectionName, sig)
tempBinFile.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
nestlabs/nlbuild
|
scripts/elf_sign.py
|
Python
|
apache-2.0
| 10,383 | 0.006549 |
__all__ = ['threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'threshold_li', ]
import numpy as np
from scipy import ndimage as ndi
from ..exposure import histogram
from .._shared.utils import assert_nD
import warnings
def threshold_adaptive(image, block_size, method='gaussian', offset=0,
mode='reflect', param=None):
"""Applies an adaptive threshold to an array.
Also known as local or dynamic thresholding where the threshold value is
the weighted mean for the local neighborhood of a pixel subtracted by a
constant. Alternatively the threshold can be determined dynamically by a a
given function using the 'generic' method.
Parameters
----------
image : (N, M) ndarray
Input image.
block_size : int
Uneven size of pixel neighborhood which is used to calculate the
threshold value (e.g. 3, 5, 7, ..., 21, ...).
method : {'generic', 'gaussian', 'mean', 'median'}, optional
Method used to determine adaptive threshold for local neighbourhood in
weighted mean image.
* 'generic': use custom function (see `param` parameter)
* 'gaussian': apply gaussian filter (see `param` parameter for custom\
sigma value)
* 'mean': apply arithmetic mean filter
* 'median': apply median rank filter
By default the 'gaussian' method is used.
offset : float, optional
Constant subtracted from weighted mean of neighborhood to calculate
the local threshold value. Default offset is 0.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The mode parameter determines how the array borders are handled, where
cval is the value when mode is equal to 'constant'.
Default is 'reflect'.
param : {int, function}, optional
Either specify sigma for 'gaussian' method or function object for
'generic' method. This functions takes the flat array of local
neighbourhood as a single argument and returns the calculated
threshold for the centre pixel.
Returns
-------
threshold : (N, M) ndarray
Thresholded binary image
References
----------
.. [1] http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#adaptivethreshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()[:50, :50]
>>> binary_image1 = threshold_adaptive(image, 15, 'mean')
>>> func = lambda arr: arr.mean()
>>> binary_image2 = threshold_adaptive(image, 15, 'generic', param=func)
"""
assert_nD(image, 2)
thresh_image = np.zeros(image.shape, 'double')
if method == 'generic':
ndi.generic_filter(image, param, block_size,
output=thresh_image, mode=mode)
elif method == 'gaussian':
if param is None:
# automatically determine sigma which covers > 99% of distribution
sigma = (block_size - 1) / 6.0
else:
sigma = param
ndi.gaussian_filter(image, sigma, output=thresh_image, mode=mode)
elif method == 'mean':
mask = 1. / block_size * np.ones((block_size,))
# separation of filters to speedup convolution
ndi.convolve1d(image, mask, axis=0, output=thresh_image, mode=mode)
ndi.convolve1d(thresh_image, mask, axis=1,
output=thresh_image, mode=mode)
elif method == 'median':
ndi.median_filter(image, block_size, output=thresh_image, mode=mode)
return image > (thresh_image - offset)
def threshold_otsu(image, nbins=256):
"""Return threshold value based on Otsu's method.
Parameters
----------
image : array
Grayscale input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Wikipedia, http://en.wikipedia.org/wiki/Otsu's_Method
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_otsu(image)
>>> binary = image <= thresh
Notes
-----
The input image must be grayscale.
"""
if image.shape[-1] in (3, 4):
msg = "threshold_otsu is expected to work correctly only for " \
"grayscale images; image shape {0} looks like an RGB image"
warnings.warn(msg.format(image.shape))
hist, bin_centers = histogram(image.ravel(), nbins)
hist = hist.astype(float)
# class probabilities for all possible thresholds
weight1 = np.cumsum(hist)
weight2 = np.cumsum(hist[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(hist * bin_centers) / weight1
mean2 = (np.cumsum((hist * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of `weight1`/`mean1` should pair with zero values in
# `weight2`/`mean2`, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
idx = np.argmax(variance12)
threshold = bin_centers[:-1][idx]
return threshold
def threshold_yen(image, nbins=256):
"""Return threshold value based on Yen's method.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities that less or equal of
this value assumed as foreground.
References
----------
.. [1] Yen J.C., Chang F.J., and Chang S. (1995) "A New Criterion
for Automatic Multilevel Thresholding" IEEE Trans. on Image
Processing, 4(3): 370-378
.. [2] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [3] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_yen(image)
>>> binary = image <= thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# On blank images (e.g. filled with 0) with int dtype, `histogram()`
# returns `bin_centers` containing only one value. Speed up with it.
if bin_centers.size == 1:
return bin_centers[0]
# Calculate probability mass function
pmf = hist.astype(np.float32) / hist.sum()
P1 = np.cumsum(pmf) # Cumulative normalized histogram
P1_sq = np.cumsum(pmf ** 2)
# Get cumsum calculated from end of squared array:
P2_sq = np.cumsum(pmf[::-1] ** 2)[::-1]
# P2_sq indexes is shifted +1. I assume, with P1[:-1] it's help avoid '-inf'
# in crit. ImageJ Yen implementation replaces those values by zero.
crit = np.log(((P1_sq[:-1] * P2_sq[1:]) ** -1) *
(P1[:-1] * (1.0 - P1[:-1])) ** 2)
return bin_centers[crit.argmax()]
def threshold_isodata(image, nbins=256, return_all=False):
"""Return threshold value(s) based on ISODATA method.
Histogram-based threshold, known as Ridler-Calvard method or inter-means.
Threshold values returned satisfy the following equality:
`threshold = (image[image <= threshold].mean() +`
`image[image > threshold].mean()) / 2.0`
That is, returned thresholds are intensities that separate the image into
two groups of pixels, where the threshold intensity is midway between the
mean intensities of these groups.
For integer images, the above equality holds to within one; for floating-
point images, the equality holds to within the histogram bin-width.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
return_all: bool, optional
If False (default), return only the lowest threshold that satisfies
the above equality. If True, return all valid thresholds.
Returns
-------
threshold : float or int or array
Threshold value(s).
References
----------
.. [1] Ridler, TW & Calvard, S (1978), "Picture thresholding using an
iterative selection method"
.. [2] IEEE Transactions on Systems, Man and Cybernetics 8: 630-632,
http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=4310039
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165,
http://www.busim.ee.boun.edu.tr/~sankur/SankurFolder/Threshold_survey.pdf
.. [4] ImageJ AutoThresholder code,
http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import coins
>>> image = coins()
>>> thresh = threshold_isodata(image)
>>> binary = image > thresh
"""
hist, bin_centers = histogram(image.ravel(), nbins)
# image only contains one unique value
if len(bin_centers) == 1:
if return_all:
return bin_centers
else:
return bin_centers[0]
hist = hist.astype(np.float32)
# csuml and csumh contain the count of pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively
csuml = np.cumsum(hist)
csumh = np.cumsum(hist[::-1])[::-1] - hist
# intensity_sum contains the total pixel intensity from each bin
intensity_sum = hist * bin_centers
# l and h contain average value of all pixels in that bin or lower, and
# in all bins strictly higher than that bin, respectively.
# Note that since exp.histogram does not include empty bins at the low or
# high end of the range, csuml and csumh are strictly > 0, except in the
# last bin of csumh, which is zero by construction.
# So no worries about division by zero in the following lines, except
# for the last bin, but we can ignore that because no valid threshold
# can be in the top bin. So we just patch up csumh[-1] to not cause 0/0
# errors.
csumh[-1] = 1
l = np.cumsum(intensity_sum) / csuml
h = (np.cumsum(intensity_sum[::-1])[::-1] - intensity_sum) / csumh
# isodata finds threshold values that meet the criterion t = (l + m)/2
# where l is the mean of all pixels <= t and h is the mean of all pixels
# > t, as calculated above. So we are looking for places where
# (l + m) / 2 equals the intensity value for which those l and m figures
# were calculated -- which is, of course, the histogram bin centers.
# We only require this equality to be within the precision of the bin
# width, of course.
all_mean = (l + h) / 2.0
bin_width = bin_centers[1] - bin_centers[0]
# Look only at thresholds that are below the actual all_mean value,
# for consistency with the threshold being included in the lower pixel
# group. Otherwise can get thresholds that are not actually fixed-points
# of the isodata algorithm. For float images, this matters less, since
# there really can't be any guarantees anymore anyway.
distances = all_mean - bin_centers
thresholds = bin_centers[(distances >= 0) & (distances < bin_width)]
if return_all:
return thresholds
else:
return thresholds[0]
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : array
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels intensities more than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
http://citeseer.ist.psu.edu/sezgin04survey.html
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
Examples
--------
>>> from skimage.data import camera
>>> image = camera()
>>> thresh = threshold_li(image)
>>> binary = image > thresh
"""
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 0.5 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
mean_obj = image[image > threshold].mean()
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
return threshold + immin
|
ClinicalGraphics/scikit-image
|
skimage/filters/thresholding.py
|
Python
|
bsd-3-clause
| 14,107 | 0.000496 |
# -*- coding: utf-8 -*-
#
# privacyIDEA is a fork of LinOTP
# May 08, 2014 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# 2014-10-17 Fix the empty result problem
# Cornelius Kölbel, <cornelius@privacyidea.org>
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__="""This is the BaseClass for audit trails
The audit is supposed to work like this. First we need to create an audit
object. E.g. this can be done in the before_request:
g.audit_object = getAudit(file_config)
During the request, the g.audit_object can be used to add audit information:
g.audit_object.log({"client": "123.2.3.4", "action": "validate/check"})
Thus at many different places in the code, audit information can be added to
the audit object.
Finally the audit_object needs to be stored to the audit storage. So we call:
g.audit_object.finalize_log()
which creates a signature of the audit data and writes the data to the audit
storage.
"""
import logging
log = logging.getLogger(__name__)
from privacyidea.lib.log import log_with
@log_with(log)
def getAuditClass(packageName, className):
"""
helper method to load the Audit class from a given
package in literal:
example:
getAuditClass("privacyidea.lib.auditmodules.sqlaudit", "Audit")
check:
checks, if the log method exists
if not an error is thrown
"""
mod = __import__(packageName, globals(), locals(), [className])
klass = getattr(mod, className)
log.debug("klass: %s" % klass)
if not hasattr(klass, "log"): # pragma: no cover
raise NameError("Audit AttributeError: " + packageName + "." +
className + " instance has no attribute 'log'")
return klass
@log_with(log)
def getAudit(config):
"""
This wrapper function creates a new audit object based on the config
from the config file. The config file entry could look like this:
PI_AUDIT_MODULE = privacyidea.lib.auditmodules.sqlaudit
Each audit module (at the moment only SQL) has its own additional config
entries.
:param config: The config entries from the file config
:return: Audit Object
"""
audit_module = config.get("PI_AUDIT_MODULE")
audit = getAuditClass(audit_module, "Audit")(config)
return audit
@log_with(log)
def search(config, param=None, user=None):
"""
Returns a list of audit entries, supports pagination
:param config: The config entries from the file config
:return: Audit dictionary with information about the previous and next
pages.
"""
audit = getAudit(config)
sortorder = "desc"
page_size = 15
page = 1
# The filtering dictionary
param = param or {}
# special treatment for:
# sortorder, page, pagesize
if "sortorder" in param:
sortorder = param["sortorder"]
del param["sortorder"]
if "page" in param:
page = param["page"]
del param["page"]
if "page_size" in param:
page_size = param["page_size"]
del param["page_size"]
pagination = audit.search(param, sortorder=sortorder, page=page,
page_size=page_size)
ret = {"auditdata": pagination.auditdata,
"prev": pagination.prev,
"next": pagination.next,
"current": pagination.page,
"count": pagination.total}
return ret
|
woddx/privacyidea
|
privacyidea/lib/audit.py
|
Python
|
agpl-3.0
| 4,198 | 0.000715 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import account_inv
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ksrajkumar/openerp-6.1
|
openerp/addons/itara_multi_payment/__init__.py
|
Python
|
agpl-3.0
| 1,080 | 0.000926 |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import MutableMapping, MutableSequence
from pants.engine.exp.addressable import SuperclassesOf, addressable
from pants.engine.exp.objects import Serializable, SerializableFactory, Validatable, ValidationError
class Configuration(Serializable, SerializableFactory, Validatable):
"""A serializable object describing some bit of build configuration.
All build configuration data is composed of basic python builtin types and higher-level
configuration objects that aggregate configuration data. Configuration objects can carry a name
in which case they become addressable and can be reused.
"""
# Internal book-keeping fields to exclude from hash codes/equality checks.
_SPECIAL_FIELDS = ('extends', 'merges', 'type_alias')
def __init__(self, abstract=False, extends=None, merges=None, **kwargs):
"""Creates a new configuration data blob.
By default configurations are anonymous (un-named), concrete (not `abstract`), and they neither
inherit nor merge another configuration.
Inheritance is only allowed via one of the `extends` or `merges` channels, it is an error to
specify both. A configuration can be semantically abstract without setting `abstract=True`.
The `abstract` value can serve as documentation, or, for subclasses that provide an
implementation for `validate_concrete`, it allows skipping validation for abstract instances.
:param bool abstract: `True` to mark this configuration item as abstract, in which case no
validation is performed (see `validate_concrete`); `False` by default.
:param extends: The configuration instance to inherit field values from. Any shared fields are
over-written with this instances values.
:type extends: An addressed or concrete configuration instance that is a type compatible with
this configuration or this configurations superclasses.
:param merges: The configuration instance to merge this instances field values with. Merging is
like extension except for containers, which are extended instead of replaced; ie:
any `dict` values are updated with this instances items and any `list` values are
extended with this instances items.
:type merges: An addressed or concrete configuration instance that is a type compatible with
this configuration or this configurations superclasses.
:param **kwargs: The configuration parameters.
"""
self._kwargs = kwargs
self._kwargs['abstract'] = abstract
self.extends = extends
self.merges = merges
# Allow for configuration items that are directly constructed in memory. These can have an
# address directly assigned (vs. inferred from name + source file location) and we only require
# that if they do, their name - if also assigned, matches the address.
if self.address:
if self.name and self.name != self.address.target_name:
self.report_validation_error('Address and name do not match! address: {}, name: {}'
.format(self.address, self.name))
self._kwargs['name'] = self.address.target_name
self._hashable_key = None
@property
def name(self):
"""Return the name of this object, if any.
In general configuration objects need not be named, in which case they are generally embedded
objects; ie: attributes values of enclosing named configuration objects. Any top-level
configuration object, though, will carry a unique name (in the configuration object's enclosing
namespace) that can be used to address it.
:rtype: string
"""
return self._kwargs.get('name')
@property
def address(self):
"""Return the address of this object, if any.
In general configuration objects need not be identified by an address, in which case they are
generally embedded objects; ie: attributes values of enclosing named configuration objects.
Any top-level configuration object, though, will be identifiable via a unique address.
:rtype: :class:`pants.build_graph.address.Address`
"""
return self._kwargs.get('address')
@property
def type_alias(self):
"""Return the type alias this target was constructed via.
For a target read from a BUILD file, this will be target alias, like 'java_library'.
For a target constructed in memory, this will be the simple class name, like 'JavaLibrary'.
The end result is that the type alias should be the most natural way to refer to this target's
type to the author of the target instance.
:rtype: string
"""
return self._kwargs.get('type_alias', type(self).__name__)
@property
def abstract(self):
"""Return `True` if this object has been marked as abstract.
Abstract objects are not validated. See: `validate_concrete`.
:rtype: bool
"""
return self._kwargs['abstract']
# It only makes sense to inherit a subset of our own fields (we should not inherit new fields!),
# our superclasses logically provide fields within this constrained set.
# NB: Since `Configuration` is at base an ~unconstrained struct, a superclass does allow for
# arbitrary and thus more fields to be defined than a subclass might logically support. We
# accept this hole in a trade for generally expected behavior when `Configuration` is subclassed
# in the style of constructors with named parameters representing the full complete set of
# expected parameters leaving **kwargs only for use by 'the system'; ie for `type_alias` and
# `address` plumbing for example.
#
# Of note is the fact that we pass a constraint type and not a concrete constraint value. This
# tells addressable to use `SuperclassesOf([Configuration instance's type])`, which is what we
# want. Aka, for `ConfigurationSubclassA`, the constraint is
# `SuperclassesOf(ConfigurationSubclassA)`.
#
@addressable(SuperclassesOf)
def extends(self):
"""Return the object this object extends, if any.
:rtype: Serializable
"""
@addressable(SuperclassesOf)
def merges(self):
"""Return the object this object merges in, if any.
:rtype: Serializable
"""
def _asdict(self):
return self._kwargs
def _extract_inheritable_attributes(self, serializable):
attributes = serializable._asdict().copy()
# Allow for un-named (embedded) objects inheriting from named objects
attributes.pop('name', None)
attributes.pop('address', None)
# We should never inherit special fields - these are for local book-keeping only.
for field in self._SPECIAL_FIELDS:
attributes.pop(field, None)
return attributes
def create(self):
if self.extends and self.merges:
self.report_validation_error('Can only inherit from one object. Both extension of {} and '
'merging with {} were requested.'
.format(self.extends.address, self.merges.address))
if self.extends:
attributes = self._extract_inheritable_attributes(self.extends)
attributes.update((k, v) for k, v in self._asdict().items()
if k not in self._SPECIAL_FIELDS and v is not None)
configuration_type = type(self)
return configuration_type(**attributes)
elif self.merges:
attributes = self._extract_inheritable_attributes(self.merges)
for k, v in self._asdict().items():
if k not in self._SPECIAL_FIELDS:
if isinstance(v, MutableMapping):
mapping = attributes.get(k) or {}
mapping.update(v)
attributes[k] = mapping
elif isinstance(v, MutableSequence):
sequence = attributes.get(k) or []
sequence.extend(v)
attributes[k] = sequence
elif v is not None:
attributes[k] = v
configuration_type = type(self)
return configuration_type(**attributes)
else:
return self
def validate(self):
if not self.abstract:
self.validate_concrete()
def report_validation_error(self, message):
"""Raises a properly identified validation error.
:param string message: An error message describing the validation error.
:raises: :class:`pants.engine.exp.objects.ValidationError`
"""
raise ValidationError(self.address, message)
def validate_concrete(self):
"""Subclasses can override to implement validation logic.
The object will be fully hydrated state and it's guaranteed the object will be concrete, aka.
not `abstract`. If an error is found in the object's configuration, a validation error should
be raised by calling `report_validation_error`.
:raises: :class:`pants.engine.exp.objects.ValidationError`
"""
def __getattr__(self, item):
return self._kwargs[item]
def _key(self):
if self._hashable_key is None:
self._hashable_key = sorted((k, v) for k, v in self._kwargs.items()
if k not in self._SPECIAL_FIELDS)
return self._hashable_key
def __hash__(self):
return hash(self._key())
def __eq__(self, other):
return isinstance(other, Configuration) and self._key() == other._key()
def __ne__(self, other):
return not (self == other)
def __repr__(self):
# TODO(John Sirois): Do something else here. This is recursive and so printing a Node prints
# its whole closure and will be too expensive for inlined objects and too bewildering past
# simple example debugging.
return '{classname}({args})'.format(classname=type(self).__name__,
args=', '.join(sorted('{}={!r}'.format(k, v)
for k, v in self._kwargs.items())))
|
slyphon/pants
|
src/python/pants/engine/exp/configuration.py
|
Python
|
apache-2.0
| 10,107 | 0.010488 |
#!/usr/bin/env python2
# Rekall Memory Forensics
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Author: Michael Cohen scudette@google.com
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
__author__ = "Michael Cohen <scudette@google.com>"
"""This implements the file finder flow.
This flow is the workhorse of filesystem operations.
"""
import collections
from rekall import plugin
from rekall_agent import flow
from rekall_agent import result_collections
from rekall_agent.client_actions import download
from rekall_agent.client_actions import files
from rekall_agent.flows import collect
from rekall_lib import serializer
from rekall_lib import utils
from rekall_lib.types import agent
class FileFilterCondition(serializer.SerializedObject):
"""Baseclass for all file filter conditions."""
def get_efilter_clause(self):
return "1"
class ModificationTimeCondition(FileFilterCondition):
schema = [
dict(name="min", type="epoch", default=None,
doc="Only select files that have an mtime after "
"this value."),
dict(name="max", type="epoch", default=None,
doc="Only select files that have an mtime before "
"this value."),
]
def get_efilter_clause(self):
result = []
if self.min:
result.append("Path.st_mtime > %s" % self.min)
if self.max:
result.append("Path.st_mtime < %s" % self.max)
return "(" + " and ".join(result) + ")"
class FileFinderFlow(collect.CollectFlow):
_collection_name = "file_finder_{timestamp}"
schema = [
dict(name="globs", repeated=True, user=True,
doc="Globs to search in client."),
dict(name="conditions", type=FileFilterCondition, repeated=True,
doc="One or more filter conditions to restrict results."),
dict(name="download", type="bool", user=True,
doc="Should we download the file?"),
dict(name="path_sep",
doc="Glob path separator"),
]
def validate(self):
super(FileFinderFlow, self).validate()
if not self.globs:
raise plugin.InvalidArgs("Some globs must be provided.")
def create_query(self, collection):
"""Make an efilter query from all the flow parameters.
Combines the high level FileFinder filter specifications to actionable
efilter query.
"""
# This code just saves some typing :-).
column_spec = collections.OrderedDict()
for x in collection.tables[0].columns:
column_spec[x.name] = "path.%s" % x.name
column_spec["dirname"] = "path.filename.dirname"
column_spec["filename"] = "path.filename.basename"
column_spec["st_mode_str"] = "str(path.st_mode)"
column_spec["st_uid"] = "path.st_uid.uid"
column_spec["st_gid"] = "path.st_gid.gid"
columns = ["%s as %s" % (v, k) for k, v in column_spec.items()]
result = (
"select %s from glob({globs}, path_sep: {path_sep})" %
",".join(columns))
# Filter conditions are specified.
if self.conditions:
parts = [x.get_efilter_clause() for x in self.conditions]
result += " where " + " and ".join(parts)
return dict(mode_live=result)
def generate_actions(self):
# Make a collection to store the result.
collection = files.StatEntryCollection.from_keywords(
session=self._session,
location=self.get_location(),
)
location = None
if self.download:
if self.is_hunt():
location = self._config.server.hunt_vfs_path_for_client(
self.flow_id, vfs_type="files",
path_template="{client_id}/{subpath}",
expiration=self.expiration())
else:
location = self._config.server.vfs_prefix_for_client(
self.client_id, vfs_type="files",
expiration=self.expiration())
yield download.GetFiles.from_keywords(
session=self._session,
query=self.create_query(collection),
query_parameters=dict(globs=self.globs,
path_sep=self.path_sep),
collection=collection,
location=location
)
class VFSIndex(result_collections.GenericSQLiteCollection):
"""The VFS index manages the VFS.
The VFS is constructed by merging one or more different StatEntryCollection
collections into a single coherent view. In order to know which
StatEntryCollection represents which specific directory we need a fast
lookup index - which is managed in this collection.
"""
_tables = [dict(
name="default",
# Each entry represents one StatEntryCollection().
columns=[
# The top level directory contained in this collection.
dict(name="dirname"),
# The end depth of this collection.
dict(name="end_depth", type="int"),
# The age of this collection.
dict(name="timestamp", type="epoch"),
# Where it is.
dict(name="location_path"),
]
)]
class ListDirectory(agent.Flow):
"""Maintain the client VFS view.
Rekall maintains a view of the client's filesystem called the VFS (Virtual
File System). The view is maintained by collecting stat() entries from the
client in many StatEntryCollection() collections and storing them in the
client's bucket namespace.
This flow (ListDirectory) is responsible for creating and managing these
collections into a unified VFS that can be browsed with the `vfs_ls` and
`vfs_cp` plugins.
"""
schema = [
dict(name="path", user=True,
doc="The name of the directory to list."),
dict(name="depth", type="int", default=1, user=True,
doc="If set we recursively list all directories."),
]
def get_location(self):
"""Work out where the agent should store the collection."""
if self.is_hunt():
return self._config.server.hunt_vfs_path_for_client(
self.flow_id, self.path, vfs_type="metadata",
expiration=self.expiration())
return self._config.server.vfs_path_for_client(
self.client_id, "%s/%s" % (self.path, self.flow_id),
expiration=self.expiration(), vfs_type="collections",
mode="w")
def validate(self):
super(ListDirectory, self).validate()
if not self.path:
raise plugin.InvalidArgs("Path must be set")
def generate_actions(self):
yield files.ListDirectoryAction.from_keywords(
session=self._session,
path=self.path,
depth=self.depth,
vfs_location=self.get_location(),
)
def post_process(self, tickets):
"""Post process the list directory collection.
We want to maintain an easier to navigate view of the client's VFS in
the client's namespace. We place a StatEntryCollection at each directory
location and write all the files within that directory.
"""
super(ListDirectory, self).post_process(tickets)
if self.is_hunt():
return
VFSIndex.transaction(
self._config.server.vfs_index_for_server(self.client_id),
self._update_vfs_index,
tickets,
session=self._session)
def _update_vfs_index(self, index_collection, tickets):
"""Extract all the directories and store them in the index."""
path = utils.normpath(self.path)
for ticket in tickets:
for collection in ticket.collections:
index_collection.insert(
dirname=path,
timestamp=ticket.timestamp,
location_path=collection.location.to_path())
|
dsweet04/rekall
|
rekall-agent/rekall_agent/flows/find.py
|
Python
|
gpl-2.0
| 8,647 | 0.000231 |
"""Custom exceptions for ExecutionContext package
"""
from generic_utils.exceptions import GenUtilsException
from generic_utils.exceptions import GenUtilsKeyError
from generic_utils.exceptions import GenUtilsRuntimeError
class ExecutionContextStackEmptyError(GenUtilsException):
"""Raised when stack is empty and blocks proper execution
"""
pass
class ExecutionContextValueDoesNotExist(GenUtilsKeyError):
"""Raised when attempting to get a value that does not exist in a backend"""
message = "Could not get key={key} from ExecutionContext."
key = None
class ExecutionContextRuntimeError(GenUtilsRuntimeError):
"""Raised when ExecutionContextStack can not recover from an unknown problem."""
message = "ExecutionContextStack could not complete operation due to reason={reason}."
reason = None
|
kevinseelbach/generic_utils
|
src/generic_utils/execution_context/exceptions.py
|
Python
|
bsd-3-clause
| 835 | 0.003593 |
# decodex - simple enigma decoder.
#
# Copyright (c) 2013 Paul R. Tagliamonte <tag@pault.ag>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from collections import defaultdict
def cleanup(what):
return what.strip().lower().replace("'", "")
def issubset(superstr, substr):
superstr = list(superstr)
for ch in substr:
if ch not in superstr:
return False
superstr.remove(ch)
return True
def strsub(superstr, substr):
superstr = list(superstr)
substr = list(substr)
for k in substr:
superstr.remove(k)
return "".join(superstr)
class Words(object):
def __init__(self, dictionary):
self.path = "/usr/share/dict/%s" % (dictionary)
self.mapping = defaultdict(set)
self.word_hash = {}
self._build_map()
def _build_map(self):
for line in (cleanup(x) for x in open(self.path, 'r')):
self.word_hash[line] = line
self.mapping["".join(sorted(line))].add(line)
def anagram(self, word, depth=2):
if depth == 0:
return
l_hash = "".join(sorted(word))
# OK. Let's start simple.
if l_hash in self.mapping:
for entry in self.mapping[l_hash]:
yield [entry]
# Meh, Let's do our best and find l_hash in r_hash.
for r_hash, entries in self.mapping.items():
if issubset(l_hash, r_hash):
leftover = strsub(l_hash, r_hash)
# OK. So, this is a word if we can match the rest.
for anagram in self.anagram(leftover, depth=(depth - 1)):
for entry in entries:
yield [entry] + anagram
|
paultag/decodex
|
decodex/utils/words.py
|
Python
|
agpl-3.0
| 2,313 | 0.001297 |
# Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from reviews_manager.models import ClinicalAnnotationStep
from rois_manager.models import Slice, Core, FocusRegion
class SliceAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
slice = models.ForeignKey(Slice, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='slice_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
high_grade_pin = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
chronic_inflammation = models.BooleanField(blank=False, null=False, default=False)
acute_inflammation = models.BooleanField(blank=False, null=False, default=False)
periglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
intraglandular_inflammation = models.BooleanField(blank=False, null=False, default=False)
stromal_inflammation = models.BooleanField(blank=False, null=False, default=False)
class Meta:
unique_together = ('slice', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.slice.get_focus_regions():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
focus_region=focus_region,
annotation_step=self.annotation_step
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
total_tumor_area = 0.0
for core in self.slice.cores.all():
total_tumor_area += core.get_total_tumor_area()
return total_tumor_area
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class CoreAnnotation(models.Model):
GLEASON_GROUP_WHO_16 = (
('GG1', 'GRADE_GROUP_1'), # gleason score <= 6
('GG2', 'GRADE_GROUP_2'), # gleason score 3+4=7
('GG3', 'GRADE_GROUP_3'), # gleason score 4+3=7
('GG4', 'GRADE_GROUP_4'), # gleason score 4+4=8 || 3+5=8 || 5+3=8
('GG5', 'GRADE_GROUP_5') # gleason score 9 or 10
)
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
core = models.ForeignKey(Core, on_delete=models.PROTECT, blank=False,
related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='core_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
primary_gleason = models.IntegerField(blank=False)
secondary_gleason = models.IntegerField(blank=False)
gleason_group = models.CharField(
max_length=3, choices=GLEASON_GROUP_WHO_16, blank=False
)
class Meta:
unique_together = ('core', 'annotation_step')
def get_gleason_4_total_area(self):
gleason_4_total_area = 0.0
for focus_region in self.core.focus_regions.all():
try:
focus_region_annotation = FocusRegionAnnotation.objects.get(
annotation_step=self.annotation_step,
focus_region=focus_region
)
gleason_4_total_area += focus_region_annotation.get_total_gleason_4_area()
except FocusRegionAnnotation.DoesNotExist:
pass
return gleason_4_total_area
def get_total_tumor_area(self):
return self.core.get_total_tumor_area()
def get_gleason_4_percentage(self):
gleason_4_total_area = self.get_gleason_4_total_area()
total_tumor_area = self.get_total_tumor_area()
try:
return (gleason_4_total_area / total_tumor_area) * 100.0
except ZeroDivisionError:
return -1
def get_grade_group_text(self):
for choice in self.GLEASON_GROUP_WHO_16:
if choice[0] == self.gleason_group:
return choice[1]
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class FocusRegionAnnotation(models.Model):
author = models.ForeignKey(User, on_delete=models.PROTECT, blank=False)
focus_region = models.ForeignKey(FocusRegion, on_delete=models.PROTECT,
blank=False, related_name='clinical_annotations')
annotation_step = models.ForeignKey(ClinicalAnnotationStep, on_delete=models.PROTECT,
blank=False, related_name='focus_region_annotations')
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
# cancerous region fields
perineural_involvement = models.BooleanField(blank=False, null=False, default=False)
intraductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
ductal_carcinoma = models.BooleanField(blank=False, null=False, default=False)
poorly_formed_glands = models.BooleanField(blank=False, null=False, default=False)
cribriform_pattern = models.BooleanField(blank=False, null=False, default=False)
small_cell_signet_ring = models.BooleanField(blank=False, null=False, default=False)
hypernephroid_pattern = models.BooleanField(blank=False, null=False, default=False)
mucinous = models.BooleanField(blank=False, null=False, default=False)
comedo_necrosis = models.BooleanField(blank=False, null=False, default=False)
# stressed region fields
inflammation = models.BooleanField(blank=False, null=False, default=False)
pah = models.BooleanField(blank=False, null=False, default=False)
atrophic_lesions = models.BooleanField(blank=False, null=False, default=False)
adenosis = models.BooleanField(blank=False, null=False, default=False)
# ---
cellular_density_helper_json = models.TextField(blank=True, null=True)
cellular_density = models.IntegerField(blank=True, null=True)
cells_count = models.IntegerField(blank=True, null=True)
class Meta:
unique_together = ('focus_region', 'annotation_step')
def get_total_gleason_4_area(self):
g4_area = 0
for g4 in self.get_gleason_4_elements():
g4_area += g4.area
return g4_area
def get_gleason_4_elements(self):
return self.gleason_elements.filter(gleason_type='G4')
def get_gleason_4_percentage(self):
g4_area = self.get_total_gleason_4_area()
try:
return (g4_area / self.focus_region.area) * 100.0
except ZeroDivisionError:
return -1
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
class GleasonElement(models.Model):
GLEASON_TYPES = (
('G1', 'GLEASON 1'),
('G2', 'GLEASON 2'),
('G3', 'GLEASON 3'),
('G4', 'GLEASON 4'),
('G5', 'GLEASON 5')
)
focus_region_annotation = models.ForeignKey(FocusRegionAnnotation, related_name='gleason_elements',
blank=False, on_delete=models.CASCADE)
gleason_type = models.CharField(max_length=2, choices=GLEASON_TYPES, blank=False, null=False)
json_path = models.TextField(blank=False, null=False)
area = models.FloatField(blank=False, null=False)
cellular_density_helper_json = models.TextField(blank=True, null=True)
cellular_density = models.IntegerField(blank=True, null=True)
cells_count = models.IntegerField(blank=True, null=True)
action_start_time = models.DateTimeField(null=True, default=None)
action_complete_time = models.DateTimeField(null=True, default=None)
creation_date = models.DateTimeField(default=timezone.now)
def get_gleason_type_label(self):
for choice in self.GLEASON_TYPES:
if choice[0] == self.gleason_type:
return choice[1]
def get_action_duration(self):
if self.action_start_time and self.action_complete_time:
return (self.action_complete_time-self.action_start_time).total_seconds()
else:
return None
|
lucalianas/ProMort
|
promort/clinical_annotations_manager/models.py
|
Python
|
mit
| 10,721 | 0.003638 |
def transform(dataset, XRANGE=None, YRANGE=None, ZRANGE=None):
"""Define this method for Python operators that
transform input scalars"""
import numpy as np
array = dataset.active_scalars
if array is None:
raise RuntimeError("No scalars found!")
# Transform the dataset.
result = np.copy(array)
result[XRANGE[0]:XRANGE[1], YRANGE[0]:YRANGE[1], ZRANGE[0]:ZRANGE[1]] = 0
# Set the result as the new scalars.
dataset.active_scalars = result
|
OpenChemistry/tomviz
|
tomviz/python/ClearVolume.py
|
Python
|
bsd-3-clause
| 490 | 0 |
#
# Copyright 2011 Twitter, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Various operations acting on the tuples.
* Select fields from the stream: retain
* Remove fields from the stream: discard (not implemented in Cascading 1.2.*)
* Rename fields: rename
"""
__author__ = 'Gabor Szabo'
import itertools
from cascading.tuple import Fields
from cascading.operation import Identity
import cascading.pipe.assembly.Rename
from pycascading.pipe import SubAssembly, coerce_to_fields
from pycascading.each import Apply
def retain(*fields_to_keep):
"""Retain only the given fields.
The fields can be given in array or by separate parameters.
"""
if len(fields_to_keep) > 1:
fields_to_keep = list(itertools.chain(fields_to_keep))
else:
fields_to_keep = fields_to_keep[0]
return Apply(fields_to_keep, Identity(Fields.ARGS), Fields.RESULTS)
def _discard(fields_to_discard):
# In 2.0 there's a builtin function this, Discard
# In 1.2 there is nothing for this
raise Exception('Discard only works with Cascading 2.0')
def rename(*args):
"""Rename the fields to new names.
If only one argument (a list of names) is given, it is assumed that the
user wants to rename all the fields. If there are two arguments, the first
list is the set of fields to be renamed, and the second is a list of the
new names.
"""
if len(args) == 1:
(fields_from, fields_to) = (Fields.ALL, args[0])
else:
(fields_from, fields_to) = (args[0], args[1])
return SubAssembly(cascading.pipe.assembly.Rename, \
coerce_to_fields(fields_from), \
coerce_to_fields(fields_to))
|
twitter/pycascading
|
python/pycascading/operators.py
|
Python
|
apache-2.0
| 2,199 | 0.00091 |
# -*- coding: utf-8 -*-
"""
@file: tasks.py
@author: lyn
@contact: tonylu716@gmail.com
@python: 3.5
@editor: Vim
@create: 3/29/17 2:20 AM
@description:
用于反爬虫的一些异步任务,主要是刷新数据表中某些临时记录。
"""
from __future__ import absolute_import, unicode_literals
from celery import task as celery_task
from .models import Ban, RecentIpActivity
from django.utils import timezone
@celery_task(name="refresh_ban")
def refresh_ban():
clear_bans = []
for ban in Ban.objects.all():
if ban.ban_to < timezone.now():
ban.delete()
print("clear {} from Ban".format(ban.ip))
clear_bans.append(ban.ip)
return clear_bans
@celery_task(name="refresh_ip_activity")
def refresh_ip_activity():
clear_act_ips = []
for ip_activity in RecentIpActivity.objects.all():
if ip_activity.destroy_time < timezone.now():
ip_activity.delete()
print("clear {} acts from activities".format(ip_activity.ip))
clear_act_ips.append(ip_activity.ip)
return clear_act_ips
|
lyn716/deep_stack
|
django_server/RobotKiller/tasks.py
|
Python
|
apache-2.0
| 1,115 | 0 |
import random as rd
class Pile:
def __init__(self, data=None):
if data:
self.data = [i for i in data]
else:
self.data = []
def __repr__(self):
max_sp = len(str(max(self.data)))
out = ""
for i in range(len(self.data) - 1, -1, -1):
out += "|{}|\n".format(self.get_fit(self.data[i], max_sp))
return out + "‾" * (max_sp + 2)
@staticmethod
def get_fit(elem, max_sp):
return str(elem) + ' ' * (max_sp - len(str(elem)))
def empiler(self, e):
self.data.append(e)
def depiler(self):
return self.data.pop()
def taille(self):
return len(self.data)
def __len__(self):
return len(self.data)
def multiplication(self):
p2 = Pile()
output = 1
for i in range(self.taille()):
elem = self.depiler()
output *= elem
p2.empiler(elem)
for i in range(p2.taille()):
self.empiler(p2.depiler())
return output
class DeuxPile:
def __init__(self, pile1: Pile, pile2: Pile):
self.p1 = pile1
self.p2 = pile2
def __repr__(self):
if self.p1.data:
max_sp1 = len(str(max(self.p1.data)))
else:
max_sp2 = len(str(max(self.p2.data)))
maxi = len(self.p2)
out = ""
for i in range(maxi - 1, -1, -1):
out += "{} |{}|\n".format(" " * 3, self.p2.get_fit(self.p2.data[i], max_sp2))
return out + "{} {}\n".format("‾" * 3, "‾" * (max_sp2 + 2))
if self.p2.data:
max_sp2 = len(str(max(self.p2.data)))
else:
maxi = len(self.p1)
out = ""
for i in range(maxi - 1, -1, -1):
out += "|{}| {}\n".format(self.p1.get_fit(self.p1.data[i], max_sp1), " " * 3)
return out + "{} {}\n".format("‾" * 3, "‾" * 3)
maxi = max([len(self.p1), len(self.p2)])
out = ""
for i in range(maxi - 1, -1, -1):
if i > len(self.p1) - 1:
out += "{} |{}|\n".format(" " * (max_sp1 + 2), self.p2.get_fit(self.p2.data[i], max_sp2))
elif i > len(self.p2) - 1:
out += "|{}| {}\n".format(self.p1.get_fit(self.p1.data[i], max_sp1), " " * (max_sp2 + 2))
else:
out += "|{}| |{}|\n".format(self.p1.get_fit(self.p1.data[i], max_sp1),
self.p2.get_fit(self.p2.data[i], max_sp2))
return out + "{} {}\n".format("‾" * (max_sp1 + 2), "‾" * (max_sp2 + 2))
def separation(self):
print(self)
temp = Pile()
for i in range(len(self.p1)):
elem = self.p1.depiler()
if elem % 2 == 0:
temp.empiler(elem)
else:
self.p2.empiler(elem)
print(self)
for i in range(len(self.p2)):
elem = self.p2.depiler()
if elem % 2 == 0:
temp.empiler(elem)
else:
self.p1.empiler(elem)
print(self)
for i in range(len(temp)):
self.p2.empiler(temp.depiler())
print(self)
# pile = Pile([1, 2, 3, 4])
# print(multiplication(pile))
# print(pile)
#
# p1 = Pile([rd.randint(0, 9) for _ in range(5)])
# p2 = Pile([rd.randint(0, 9) for _ in range(5)])
# two_pile = DeuxPile(p1, p2)
# two_pile.separation()
#
# def suite_newton(r, n):
# if n == 0:
# return r
# prec = suite_newton(r, n - 1)
# return (prec + (r / prec)) / 2
#
#
# def sqrt_newton(r, error):
# n = 0
# racine = suite_newton(r, n)
# racine_carre = racine * racine
# while not - error < r - racine_carre < error:
# n += 1
# racine = suite_newton(r, n)
# racine_carre = racine * racine
# print("{} -> {}".format(n, racine))
# return racine
#
#
# # print(suite_newton(3, 8))
# # sqrt_newton(3, 0.01)
#
# def dichoto(r, error):
# mini = 0
# maxi = r
# racine = (maxi + mini) / 2
# racine_carre = racine * racine
# while not -error < r - racine_carre < error:
# if racine * racine > r:
# maxi = racine
# if racine * racine < r:
# mini = racine
# print(racine)
# racine = (maxi + mini) / 2
# racine_carre = racine * racine
# return racine
#
#
# dichoto(3, 0.01)
#
#
# def average(reads):
# sum = 0
# for read in reads:
# sum += len(read)
# return sum / len(reads)
#
#
# print(average(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def threshold(reads):
# moyenne = average(reads)
# output = []
# for read in reads:
# if len(read) >= moyenne:
# output.append(read)
# return output
#
#
# print(threshold(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def count_nucl(seq: str, symbol: str):
# output = 0
# for nucl in seq:
# if nucl == symbol:
# output += 1
# return output
#
#
# print(count_nucl("AGGCT", "G"))
#
#
# def ratio_gc(reads: list):
# list_gc = []
# for read in reads:
# counter = 0
# for nucl in read:
# if nucl == "G" or nucl == "C":
# counter += 1
# list_gc.append(counter / len(read))
# somme = 0
# for gc in list_gc:
# somme += gc
# return somme / len(list_gc)
#
#
# print(ratio_gc(["AGGCT", "GGAT", "GGCAAA"]))
#
#
# def remove_ends(reads, adaptor: str):
# output = []
# for read in reads:
# if read[:len(adaptor)] == adaptor:
# output.append(read[len(adaptor):])
# if read[-len(adaptor):] == adaptor:
# output.append(read[:-len(adaptor)])
# return output
#
#
# print(remove_ends(["TTTCAGGCT", "GGATTTTC", "TTTCGGCAAA"], "TTTC"))
def pre_sup(center: str):
return ["__"] + [center] * 4 + ["__"]
def tableau():
tab = [pre_sup("-")]
for i in range(3):
tab.append(pre_sup("0"))
tab.append(pre_sup("1"))
tab.append(pre_sup("-"))
for i in tab:
print(" ".join(i))
# tableau()
def molecule(counter):
s = 1
pas = 3
for n in range(pas, counter, pas):
if n % 2 != 0:
s = s - n
print("n =", n)
print(s)
return s
print("Hello")
print("Give a value")
info = int(input())
total = molecule(info)
print(total)
def till_0():
test = 16
liste = []
while test != 0:
test = int(input("n = "))
liste.append(test)
for i in liste[:-2]:
if i > liste[-2]:
print(i)
# till_0()
def add_seq():
dico = {}
print("Identifiant:")
dico["id"] = input()
print("Sequence ADN:")
dico["seq"] = input()
dico["len"] = len(dico["seq"])
print("Liste gene ( format ==> a b c d):")
dico["gene"] = input().split()
data.append(dico)
def show_list_gene():
dico_gene = {}
for seq in data:
for gene in seq["gene"]:
if gene in dico_gene:
dico_gene[gene] += [seq["seq"]]
else:
dico_gene[gene] = [seq["seq"]]
for key in dico_gene:
print("{} : {}".format(key, " ".join(dico_gene[key])))
def map_kinase():
global data
data = []
while True:
print("1 --> Ajouter une séquence")
print("2 --> Afficher info d'une espèce")
print("3 --> Afficher liste des gènes et séquences associées")
print("4 --> Exit")
choix = input()
if choix == "1":
add_seq()
elif choix == "2":
pass
elif choix == "3":
show_list_gene()
elif choix == "4":
exit()
# map_kinase()
|
bros-bioinfo/bros-bioinfo.github.io
|
COURS/M1/SEMESTRE1/ALGO_PROG/ALGO/Eliot/anales.py
|
Python
|
mit
| 7,701 | 0.000781 |
import os
from rbm import RBM
from au import AutoEncoder
import tensorflow as tf
import input_data
from utilsnn import show_image, min_max_scale
import matplotlib.pyplot as plt
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')
flags.DEFINE_integer('epochs', 50, 'The number of training epochs')
flags.DEFINE_integer('batchsize', 30, 'The batch size')
flags.DEFINE_boolean('restore_rbm', False, 'Whether to restore the RBM weights or not.')
# ensure output dir exists
if not os.path.isdir('out'):
os.mkdir('out')
mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
trX, teY = min_max_scale(trX, teX)
# RBMs
rbmobject1 = RBM(784, 900, ['rbmw1', 'rbvb1', 'rbmhb1'], 0.3)
rbmobject2 = RBM(900, 500, ['rbmw2', 'rbvb2', 'rbmhb2'], 0.3)
rbmobject3 = RBM(500, 250, ['rbmw3', 'rbvb3', 'rbmhb3'], 0.3)
rbmobject4 = RBM(250, 2, ['rbmw4', 'rbvb4', 'rbmhb4'], 0.3)
if FLAGS.restore_rbm:
rbmobject1.restore_weights('./out/rbmw1.chp')
rbmobject2.restore_weights('./out/rbmw2.chp')
rbmobject3.restore_weights('./out/rbmw3.chp')
rbmobject4.restore_weights('./out/rbmw4.chp')
# Autoencoder
autoencoder = AutoEncoder(784, [900, 500, 250, 2], [['rbmw1', 'rbmhb1'],
['rbmw2', 'rbmhb2'],
['rbmw3', 'rbmhb3'],
['rbmw4', 'rbmhb4']], tied_weights=False)
iterations = len(trX) / FLAGS.batchsize
# Train First RBM
print('first rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
rbmobject1.partial_fit(batch_xs)
print(rbmobject1.compute_cost(trX))
show_image("out/1rbm.jpg", rbmobject1.n_w, (28, 28), (30, 30))
rbmobject1.save_weights('./out/rbmw1.chp')
# Train Second RBM2
print('second rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
# Transform features with first rbm for second rbm
batch_xs = rbmobject1.transform(batch_xs)
rbmobject2.partial_fit(batch_xs)
print(rbmobject2.compute_cost(rbmobject1.transform(trX)))
show_image("out/2rbm.jpg", rbmobject2.n_w, (30, 30), (25, 20))
rbmobject2.save_weights('./out/rbmw2.chp')
# Train Third RBM
print('third rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
# Transform features
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
batch_xs = rbmobject1.transform(batch_xs)
batch_xs = rbmobject2.transform(batch_xs)
rbmobject3.partial_fit(batch_xs)
print(rbmobject3.compute_cost(rbmobject2.transform(rbmobject1.transform(trX))))
show_image("out/3rbm.jpg", rbmobject3.n_w, (25, 20), (25, 10))
rbmobject3.save_weights('./out/rbmw3.chp')
# Train Third RBM
print('fourth rbm')
for i in range(FLAGS.epochs):
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
# Transform features
batch_xs = rbmobject1.transform(batch_xs)
batch_xs = rbmobject2.transform(batch_xs)
batch_xs = rbmobject3.transform(batch_xs)
rbmobject4.partial_fit(batch_xs)
print(rbmobject4.compute_cost(rbmobject3.transform(rbmobject2.transform(rbmobject1.transform(trX)))))
rbmobject4.save_weights('./out/rbmw4.chp')
# Load RBM weights to Autoencoder
autoencoder.load_rbm_weights('./out/rbmw1.chp', ['rbmw1', 'rbmhb1'], 0)
autoencoder.load_rbm_weights('./out/rbmw2.chp', ['rbmw2', 'rbmhb2'], 1)
autoencoder.load_rbm_weights('./out/rbmw3.chp', ['rbmw3', 'rbmhb3'], 2)
autoencoder.load_rbm_weights('./out/rbmw4.chp', ['rbmw4', 'rbmhb4'], 3)
# Train Autoencoder
print('autoencoder')
for i in range(FLAGS.epochs):
cost = 0.0
for j in range(iterations):
batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batchsize)
cost += autoencoder.partial_fit(batch_xs)
print(cost)
autoencoder.save_weights('./out/au.chp')
autoencoder.load_weights('./out/au.chp')
fig, ax = plt.subplots()
print(autoencoder.transform(teX)[:, 0])
print(autoencoder.transform(teX)[:, 1])
plt.scatter(autoencoder.transform(teX)[:, 0], autoencoder.transform(teX)[:, 1], alpha=0.5)
plt.show()
raw_input("Press Enter to continue...")
plt.savefig('out/myfig')
|
Cospel/rbm-ae-tf
|
test-ae-rbm.py
|
Python
|
mit
| 4,382 | 0.005933 |
from django.shortcuts import render
from moth.views.base.vulnerable_template_view import VulnerableTemplateView
class EchoHeadersView(VulnerableTemplateView):
description = title = 'Echoes all request headers'
url_path = 'echo-headers.py'
KNOWN_HEADERS = ('CONTENT_LENGTH',)
def is_http_header(self, hname):
return hname.startswith('HTTP_') or hname in self.KNOWN_HEADERS
def translate_header(self, hname):
hname = hname.replace('HTTP_', '')
hname = hname.replace('_', '-')
hname = hname.lower()
hname = hname.title()
return hname
def get(self, request, *args, **kwds):
context = self.get_context_data()
html = ''
msg_fmt = 'Header "%s" with value "%s" <br/>\n'
for hname in request.META:
if self.is_http_header(hname):
html += msg_fmt % (self.translate_header(hname),
request.META[hname])
context['html'] = html
return render(request, self.template_name, context)
|
andresriancho/django-moth
|
moth/views/vulnerabilities/core/headers.py
|
Python
|
gpl-2.0
| 1,103 | 0.00544 |
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# for py2/py3 compatibility
from __future__ import print_function
import signal
from . import base
from testrunner.local import utils
class SignalProc(base.TestProcObserver):
def __init__(self):
super(SignalProc, self).__init__()
self.exit_code = utils.EXIT_CODE_PASS
def setup(self, *args, **kwargs):
super(SignalProc, self).setup(*args, **kwargs)
# It should be called after processors are chained together to not loose
# catched signal.
signal.signal(signal.SIGINT, self._on_ctrlc)
signal.signal(signal.SIGTERM, self._on_sigterm)
def _on_ctrlc(self, _signum, _stack_frame):
print('>>> Ctrl-C detected, early abort...')
self.exit_code = utils.EXIT_CODE_INTERRUPTED
self.stop()
def _on_sigterm(self, _signum, _stack_frame):
print('>>> SIGTERM received, early abort...')
self.exit_code = utils.EXIT_CODE_TERMINATED
self.stop()
|
weolar/miniblink49
|
v8_7_5/tools/testrunner/testproc/sigproc.py
|
Python
|
apache-2.0
| 1,059 | 0.003777 |
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class SeriesActors(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
SeriesActors - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'data': 'list[SeriesActorsData]'
}
self.attribute_map = {
'data': 'data'
}
self._data = None
@property
def data(self):
"""
Gets the data of this SeriesActors.
:return: The data of this SeriesActors.
:rtype: list[SeriesActorsData]
"""
return self._data
@data.setter
def data(self, data):
"""
Sets the data of this SeriesActors.
:param data: The data of this SeriesActors.
:type: list[SeriesActorsData]
"""
self._data = data
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
FireBladeNooT/Medusa_1_6
|
lib/tvdbapiv2/models/series_actors.py
|
Python
|
gpl-3.0
| 3,002 | 0.000666 |
import random
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
_largesize = 300
def __init__(self, head):
self.head = head
self.lsize = 0
while head.next:
head = head.next
self.lsize += 1
self.m1_idx = None
self.m2_idx = None
if self.lsize > self._largesize:
self.m1_idx = self.lsize / 3 # start from 1/3
self.m1 = self._getN(self.m1_idx)
self.m2_idx = self.m1_idx * 2 # start from 2/3
self.m2 = self._getN(self.m2_idx)
def _getN(self, n):
n -= 1
p = self.head
while n:
p = p.next
n -= 1
return p
def getRandom(self):
def _get(delta, start):
p = start
while delta:
p = p.next
delta -= 1
return p.val
nextpos = random.randint(0, self.lsize)
if not self.m1_idx:
return _get(nextpos, self.head)
if nextpos < self.m1_idx:
val = _get(nextpos, self.head)
elif nextpos < self.m2_idx:
val = _get(nextpos - self.m1_idx, self.m1)
else:
val = _get(nextpos - self.m2_idx, self.m2)
return val
|
daicang/Leetcode-solutions
|
382-linked-list-random-node.py
|
Python
|
mit
| 1,372 | 0 |
from setuptools import setup
from os import path, environ
from sys import argv
here = path.abspath(path.dirname(__file__))
try:
if argv[1] == "test":
environ['PYTHONPATH'] = here
except IndexError:
pass
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='libfs',
version='0.1',
description='Library Filesystem',
long_description=long_description,
author='Christof Hanke',
author_email='christof.hanke@induhviduals.de',
url='https://github.com/ya-induhvidual/libfs',
packages=['Libfs'],
license='MIT',
install_requires=['llfuse', 'mutagenx'],
test_suite="test/test_all.py",
scripts=['scripts/libfs.py'],
keywords='fuse multimedia',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Filesystems'
],
)
|
ya-induhvidual/libfs
|
setup.py
|
Python
|
mit
| 1,167 | 0 |
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Require Purchase Order Number',
'version': '13.0.1.1.0',
'category': 'Sales',
'sequence': 14,
'summary': '',
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'sale_stock'
],
'data': [
'views/sale_order_views.xml',
'views/res_partner_views.xml',
'views/account_move_views.xml',
'views/stock_picking_views.xml'
],
'demo': [
],
'installable': False,
'auto_install': False,
'application': False,
}
|
ingadhoc/sale
|
sale_require_purchase_order_number/__manifest__.py
|
Python
|
agpl-3.0
| 1,515 | 0 |
# coding=utf-8
import unittest
from types import MethodType
from parameterized import parameterized
from six import StringIO
from conans.client.output import ConanOutput, colorama_initialize
from mock import mock
class ConanOutputTest(unittest.TestCase):
def test_blocked_output(self):
# https://github.com/conan-io/conan/issues/4277
stream = StringIO()
def write_raise(self, data):
write_raise.counter = getattr(write_raise, "counter", 0) + 1
if write_raise.counter < 2:
raise IOError("Stdout locked")
self.super_write(data)
stream.super_write = stream.write
stream.write = MethodType(write_raise, stream)
out = ConanOutput(stream)
with mock.patch("time.sleep") as sleep:
out.write("Hello world")
sleep.assert_any_call(0.02)
self.assertEqual("Hello world", stream.getvalue())
@parameterized.expand([(False, {}),
(False, {"CONAN_COLOR_DISPLAY": "0"}),
(True, {"CONAN_COLOR_DISPLAY": "0"}),
(False, {"PYCHARM_HOSTED": "1"}),
(True, {"PYCHARM_HOSTED": "1", "CONAN_COLOR_DISPLAY": "0"}),
(True, {"NO_COLOR": ""}),
(True, {"CLICOLOR": "0"}),
(True, {"CLICOLOR": "0", "CONAN_COLOR_DISPLAY": "1"}),
(False, {"CLICOLOR": "1"}),
(False, {"CLICOLOR_FORCE": "0"}),
(True,
{"CLICOLOR": "1", "CLICOLOR_FORCE": "1", "CONAN_COLOR_DISPLAY": "1",
"PYCHARM_HOSTED": "1", "NO_COLOR": "1"})])
def test_output_no_color(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert not colorama_initialize()
init.assert_not_called()
@parameterized.expand([(True, {}),
(False, {"CONAN_COLOR_DISPLAY": "1"}),
(True, {"CONAN_COLOR_DISPLAY": "1"}),
(True, {"CLICOLOR": "1"}),
(True, {"CLICOLOR_FORCE": "0"})])
def test_output_color(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert colorama_initialize()
init.assert_called_once_with()
@parameterized.expand([(False, {"PYCHARM_HOSTED": "1", "CONAN_COLOR_DISPLAY": "1"}),
(True, {"PYCHARM_HOSTED": "1"}),
(False, {"CLICOLOR_FORCE": "1"}),
(True, {"CLICOLOR_FORCE": "1", "CLICOLOR": "0"}),
(True, {"CLICOLOR_FORCE": "1", "CONAN_COLOR_DISPLAY": "0"})])
def test_output_color_prevent_strip(self, isatty, env):
with mock.patch("colorama.init") as init:
with mock.patch("sys.stdout.isatty", return_value=isatty), \
mock.patch.dict("os.environ", env, clear=True):
assert colorama_initialize()
init.assert_called_once_with(convert=False, strip=False)
|
conan-io/conan
|
conans/test/unittests/client/conan_output_test.py
|
Python
|
mit
| 3,440 | 0.001453 |
# GUI frame for the hprModel_function.py
try:
# for Python2
from Tkinter import * ## notice capitalized T in Tkinter
import tkFileDialog, tkMessageBox
except ImportError:
# for Python3
from tkinter import * ## notice lowercase 't' in tkinter here
from tkinter import filedialog as tkFileDialog
from tkinter import messagebox as tkMessageBox
import sys, os
from scipy.io.wavfile import read
import hpsModel_function
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
import utilFunctions as UF
class HpsModel_frame:
def __init__(self, parent):
self.parent = parent
self.initUI()
def initUI(self):
choose_label = "Input file (.wav, mono and 44100 sampling rate):"
Label(self.parent, text=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2))
#TEXTBOX TO PRINT PATH OF THE SOUND FILE
self.filelocation = Entry(self.parent)
self.filelocation.focus_set()
self.filelocation["width"] = 25
self.filelocation.grid(row=1,column=0, sticky=W, padx=10)
self.filelocation.delete(0, END)
self.filelocation.insert(0, '../../sounds/sax-phrase-short.wav')
#BUTTON TO BROWSE SOUND FILE
self.open_file = Button(self.parent, text="Browse...", command=self.browse_file) #see: def browse_file(self)
self.open_file.grid(row=1, column=0, sticky=W, padx=(220, 6)) #put it beside the filelocation textbox
#BUTTON TO PREVIEW SOUND FILE
self.preview = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation.get()), bg="gray30", fg="white")
self.preview.grid(row=1, column=0, sticky=W, padx=(306,6))
## HARMONIC MODEL
#ANALYSIS WINDOW TYPE
wtype_label = "Window type:"
Label(self.parent, text=wtype_label).grid(row=2, column=0, sticky=W, padx=5, pady=(10,2))
self.w_type = StringVar()
self.w_type.set("blackman") # initial value
window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris")
window_option.grid(row=2, column=0, sticky=W, padx=(95,5), pady=(10,2))
#WINDOW SIZE
M_label = "Window size (M):"
Label(self.parent, text=M_label).grid(row=4, column=0, sticky=W, padx=5, pady=(10,2))
self.M = Entry(self.parent, justify=CENTER)
self.M["width"] = 5
self.M.grid(row=4,column=0, sticky=W, padx=(115,5), pady=(10,2))
self.M.delete(0, END)
self.M.insert(0, "601")
#FFT SIZE
N_label = "FFT size (N) (power of two bigger than M):"
Label(self.parent, text=N_label).grid(row=5, column=0, sticky=W, padx=5, pady=(10,2))
self.N = Entry(self.parent, justify=CENTER)
self.N["width"] = 5
self.N.grid(row=5,column=0, sticky=W, padx=(270,5), pady=(10,2))
self.N.delete(0, END)
self.N.insert(0, "1024")
#THRESHOLD MAGNITUDE
t_label = "Magnitude threshold (t) (in dB):"
Label(self.parent, text=t_label).grid(row=6, column=0, sticky=W, padx=5, pady=(10,2))
self.t = Entry(self.parent, justify=CENTER)
self.t["width"] = 5
self.t.grid(row=6, column=0, sticky=W, padx=(205,5), pady=(10,2))
self.t.delete(0, END)
self.t.insert(0, "-100")
#MIN DURATION SINUSOIDAL TRACKS
minSineDur_label = "Minimum duration of sinusoidal tracks:"
Label(self.parent, text=minSineDur_label).grid(row=7, column=0, sticky=W, padx=5, pady=(10,2))
self.minSineDur = Entry(self.parent, justify=CENTER)
self.minSineDur["width"] = 5
self.minSineDur.grid(row=7, column=0, sticky=W, padx=(250,5), pady=(10,2))
self.minSineDur.delete(0, END)
self.minSineDur.insert(0, "0.1")
#MAX NUMBER OF HARMONICS
nH_label = "Maximum number of harmonics:"
Label(self.parent, text=nH_label).grid(row=8, column=0, sticky=W, padx=5, pady=(10,2))
self.nH = Entry(self.parent, justify=CENTER)
self.nH["width"] = 5
self.nH.grid(row=8, column=0, sticky=W, padx=(215,5), pady=(10,2))
self.nH.delete(0, END)
self.nH.insert(0, "100")
#MIN FUNDAMENTAL FREQUENCY
minf0_label = "Minimum fundamental frequency:"
Label(self.parent, text=minf0_label).grid(row=9, column=0, sticky=W, padx=5, pady=(10,2))
self.minf0 = Entry(self.parent, justify=CENTER)
self.minf0["width"] = 5
self.minf0.grid(row=9, column=0, sticky=W, padx=(220,5), pady=(10,2))
self.minf0.delete(0, END)
self.minf0.insert(0, "350")
#MAX FUNDAMENTAL FREQUENCY
maxf0_label = "Maximum fundamental frequency:"
Label(self.parent, text=maxf0_label).grid(row=10, column=0, sticky=W, padx=5, pady=(10,2))
self.maxf0 = Entry(self.parent, justify=CENTER)
self.maxf0["width"] = 5
self.maxf0.grid(row=10, column=0, sticky=W, padx=(220,5), pady=(10,2))
self.maxf0.delete(0, END)
self.maxf0.insert(0, "700")
#MAX ERROR ACCEPTED
f0et_label = "Maximum error in f0 detection algorithm:"
Label(self.parent, text=f0et_label).grid(row=11, column=0, sticky=W, padx=5, pady=(10,2))
self.f0et = Entry(self.parent, justify=CENTER)
self.f0et["width"] = 5
self.f0et.grid(row=11, column=0, sticky=W, padx=(265,5), pady=(10,2))
self.f0et.delete(0, END)
self.f0et.insert(0, "5")
#ALLOWED DEVIATION OF HARMONIC TRACKS
harmDevSlope_label = "Max frequency deviation in harmonic tracks:"
Label(self.parent, text=harmDevSlope_label).grid(row=12, column=0, sticky=W, padx=5, pady=(10,2))
self.harmDevSlope = Entry(self.parent, justify=CENTER)
self.harmDevSlope["width"] = 5
self.harmDevSlope.grid(row=12, column=0, sticky=W, padx=(285,5), pady=(10,2))
self.harmDevSlope.delete(0, END)
self.harmDevSlope.insert(0, "0.01")
#DECIMATION FACTOR
stocf_label = "Stochastic approximation factor:"
Label(self.parent, text=stocf_label).grid(row=13, column=0, sticky=W, padx=5, pady=(10,2))
self.stocf = Entry(self.parent, justify=CENTER)
self.stocf["width"] = 5
self.stocf.grid(row=13, column=0, sticky=W, padx=(210,5), pady=(10,2))
self.stocf.delete(0, END)
self.stocf.insert(0, "0.2")
#BUTTON TO COMPUTE EVERYTHING
self.compute = Button(self.parent, text="Compute", command=self.compute_model, bg="dark red", fg="white")
self.compute.grid(row=14, column=0, padx=5, pady=(10,2), sticky=W)
#BUTTON TO PLAY SINE OUTPUT
output_label = "Sinusoidal:"
Label(self.parent, text=output_label).grid(row=15, column=0, sticky=W, padx=5, pady=(10,0))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel_sines.wav'), bg="gray30", fg="white")
self.output.grid(row=15, column=0, padx=(80,5), pady=(10,0), sticky=W)
#BUTTON TO PLAY STOCHASTIC OUTPUT
output_label = "Stochastic:"
Label(self.parent, text=output_label).grid(row=16, column=0, sticky=W, padx=5, pady=(5,0))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel_stochastic.wav'), bg="gray30", fg="white")
self.output.grid(row=16, column=0, padx=(80,5), pady=(5,0), sticky=W)
#BUTTON TO PLAY OUTPUT
output_label = "Output:"
Label(self.parent, text=output_label).grid(row=17, column=0, sticky=W, padx=5, pady=(5,15))
self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_hpsModel.wav'), bg="gray30", fg="white")
self.output.grid(row=17, column=0, padx=(80,5), pady=(5,15), sticky=W)
# define options for opening file
self.file_opt = options = {}
options['defaultextension'] = '.wav'
options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')]
options['initialdir'] = '../../sounds/'
options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz'
def browse_file(self):
self.filename = tkFileDialog.askopenfilename(**self.file_opt)
#set the text of the self.filelocation
self.filelocation.delete(0, END)
self.filelocation.insert(0,self.filename)
def compute_model(self):
try:
inputFile = self.filelocation.get()
window = self.w_type.get()
M = int(self.M.get())
N = int(self.N.get())
t = int(self.t.get())
minSineDur = float(self.minSineDur.get())
nH = int(self.nH.get())
minf0 = int(self.minf0.get())
maxf0 = int(self.maxf0.get())
f0et = int(self.f0et.get())
harmDevSlope = float(self.harmDevSlope.get())
stocf = float(self.stocf.get())
hpsModel_function.main(inputFile, window, M, N, t, minSineDur, nH, minf0, maxf0, f0et, harmDevSlope, stocf)
except ValueError as errorMessage:
tkMessageBox.showerror("Input values error", errorMessage)
|
MTG/sms-tools
|
software/models_interface/hpsModel_GUI_frame.py
|
Python
|
agpl-3.0
| 8,438 | 0.03425 |
number = input()
number_array = [(int)(x) for x in raw_input().split()]
total = 0
for i in range(1, number):
for j in range(i):
ii = number_array[i]
jj = number_array[j]
if ii < jj:
total += i - j
number_array = number_array[:j] + [ii] + [jj] + number_array[j+1:i] + number_array[i+1:]
break
print total
|
xbfool/hackerrank_xbfool
|
src/algorithms/arrays_and_sorting/running_time_of_algorithms.py
|
Python
|
mit
| 325 | 0.036923 |
import json
import requests
import time
import csv
from datetime import datetime
#----------------------------------------------------------------------
def login(client_id, client_secret, username, password):
"""logs into reddit using Oauth2"""
client_auth = requests.auth.HTTPBasicAuth(client_id, client_secret)
post_data = {"grant_type": "password", "username": username,
"password": password}
response = requests.post("https://www.reddit.com/api/v1/access_token",
auth=client_auth, data=post_data)
print response
token_json = response.json()
headers = {"Authorization": "%s %s" % (token_json["token_type"],
token_json["access_token"]),
"User-Agent": user_agent}
return headers
#----------------------------------------------------------------------
def subredditInfo(sr, limit=100, sorting='top',
user_agent="ChicagoSchool's scraper", **kwargs):
"""retrieves X (max 100) amount of stories in a subreddit
'sorting' is whether or not the sorting of the reddit should be customized or not,
if it is: Allowed passing params/queries such as t=hour, week, month, year or all"""
#query to send
parameters = {'limit': limit,}
parameters.update(kwargs)
url = 'http://www.reddit.com/r/%s/%s.json?limit=%d' % (sr, sorting, limit)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
#return list of stories
stories = []
for story in j['data']['children']:
stories.append(story)
return stories
#----------------------------------------------------------------------
def collapseComments(comment):
"""takes in a comment json object and collapses the text for children
into a list"""
if "body" in comment["data"].keys():
comment_list = [comment["data"]["body"].replace(",", "").replace("\n", " ").encode("utf-8")]
if type(comment["data"]["replies"]) is not unicode:
replies = comment["data"]["replies"]["data"]["children"]
for r in replies:
comment_list.extend(collapseComments(r))
return comment_list
else:
return []
#----------------------------------------------------------------------
def threadComments(link, limit=100, sorting='',
user_agent="ChicagoSchool's scraper", **kwargs):
"""gets X (max 100) amount of comments for a given thread."""
#query to send
parameters = {'limit': limit,}
parameters.update(kwargs)
url = 'http://www.reddit.com/%s/%s.json?limit=%d' % (link, sorting, limit)
r = requests.get(url, headers={"user-agent": user_agent})
j = json.loads(r.text)
#return list of comments
if len(j) > 1:
comments = []
for c in j[1]["data"]["children"]:
comments.extend(collapseComments(c))
return comments
#----------------------------------------------------------------------
def fullRun(sr_list, ind, out_f):
"""iterates through a list of subreddits, gets their top threads and
the comments for the top posts"""
t_1 = datetime.now()
sr_comment_list = []
sr_ln = len(sr_list)
for i, sr in enumerate(sr_list[ind:]):
time.sleep(2)
try:
sr_info = subredditInfo(sr)
sr_comments = []
sr_info_ln = len(sr_info)
except Exception as e:
print e
time.sleep(300)
sr_info = subredditInfo(sr)
sr_comments = []
sr_info_ln = len(sr_info)
for j, l in enumerate(sr_info):
try:
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
except Exception as e:
print e
time.sleep(60)
try:
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
except Exception as e:
print e
time.sleep(300)
sr_comments.extend(threadComments(l["data"]["permalink"]))
print ((i + ind) * 100.) / sr_ln, (j * 100.) / sr_info_ln, datetime.now() - t_1, i + ind, j, sr_info_ln
time.sleep(2)
sr_str = " ".join(sr_comments)
out_d = open(out_f, "ab")
out_w = csv.writer(out_d)
out_w.writerow([sr, sr_str])
out_d.close()
|
lbybee/reddit_spelling_index
|
reddit_spelling_scraper.py
|
Python
|
gpl-2.0
| 4,746 | 0.006532 |
a_str1 = "Craig McBean"
a_str2 = "Sheree-Annm Lewis-McBean"
a_str3 = 'Sheyenne Lewis'
a_str4 = raw_input("Enter fourth Name: ")
print "{:>30}".format(a_str1)
print "{:>30}".format(a_str2)
print "{:>30}".format(a_str3)
print "{:>30}".format(a_str4)
|
cmcbean/pynet_test
|
my-str-ex1.py
|
Python
|
apache-2.0
| 251 | 0.003984 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from openstack.tests.unit import base
from openstack.identity.v2 import extension
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {
'alias': '1',
'description': '2',
'links': '3',
'name': '4',
'namespace': '5',
'updated': '2015-03-09T12:14:57.233772',
}
class TestExtension(base.TestCase):
def test_basic(self):
sot = extension.Extension()
self.assertEqual('extension', sot.resource_key)
self.assertEqual('extensions', sot.resources_key)
self.assertEqual('/extensions', sot.base_path)
self.assertFalse(sot.allow_create)
self.assertTrue(sot.allow_fetch)
self.assertFalse(sot.allow_commit)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
def test_make_it(self):
sot = extension.Extension(**EXAMPLE)
self.assertEqual(EXAMPLE['alias'], sot.alias)
self.assertEqual(EXAMPLE['description'], sot.description)
self.assertEqual(EXAMPLE['links'], sot.links)
self.assertEqual(EXAMPLE['name'], sot.name)
self.assertEqual(EXAMPLE['namespace'], sot.namespace)
self.assertEqual(EXAMPLE['updated'], sot.updated_at)
def test_list(self):
resp = mock.Mock()
resp.body = {
"extensions": {
"values": [
{"name": "a"},
{"name": "b"},
]
}
}
resp.json = mock.Mock(return_value=resp.body)
session = mock.Mock()
session.get = mock.Mock(return_value=resp)
sot = extension.Extension(**EXAMPLE)
result = sot.list(session)
self.assertEqual(next(result).name, 'a')
self.assertEqual(next(result).name, 'b')
self.assertRaises(StopIteration, next, result)
|
ctrlaltdel/neutrinator
|
vendor/openstack/tests/unit/identity/v2/test_extension.py
|
Python
|
gpl-3.0
| 2,340 | 0 |
#!/usr/bin/env/python
"""
We treat each silica atom as its own residue. As a consequence,
we have more than 10000 residues in the system.
The silly PDB format only allows up to 9999 residues. We solve
this issue by manually creating a .gro file, which allows for
up to 99999 residues
"""
from __future__ import print_function
import re
import MDAnalysis as mda
pdbf = 'SiO2carved_ovl1.5_protein_0.17.pdb'
# retrieve atom info
u = mda.Universe(pdbf)
GRO_FMT = ('{resid:>5d}{resname:<5s}{name:>5s}{id:>5d}'
'{pos[0]:8.3f}{pos[1]:8.3f}{pos[2]:8.3f}'
'\n')
gro = 'confined BSA, t= 0.0\n'
#natoml = '{:5d}\n'.format(len(u.atoms))
atoml = ''
iat=0
vals = dict()
last_resid = 0
for atom in u.atoms:
iat += 1
vals['id'] = atom.id
vals['name'] = atom.name
# residue name
vals['resname'] = atom.resname
if atom.name in ('SIO', 'OSI', 'OA'):
vals['resname'] = atom.name
elif atom.resname == 'SPC':
vals['resname'] = 'SOL'
# residue number
vals['resid'] = atom.resid
if vals['resname'] in ('SIO', 'OSI', 'OA'):
last_resid += 1
vals['resid'] = last_resid
else:
last_resid = atom.resid
vals['pos'] = atom.position/10.0 # from Angstroms to nm
atoml += GRO_FMT.format(**vals)
gro += '{:5d}\n'.format(iat)
gro += atoml
#retrieve the box size
pdb = open(pdbf).read()
RE_BOX = re.compile('CRYST1\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)')
xyz = [float(xi)/10.0 for xi in RE_BOX.search(pdb).groups()]
gro += ' {:9.4f} {:9.4f} {:9.4f}\n'.format(*xyz)
open('confinedBSA_0.gro', 'w').write(gro)
|
jmborr/confinedBSA
|
simulation/silica/cristobalite/confineBSA/poretop/nobonds/adaptPDB.py
|
Python
|
mit
| 1,589 | 0.010069 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@sinh
@@cosh
@@asinh
@@acosh
@@atanh
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@atan2
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@angle
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@bincount
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
arg_max = deprecated(None, "Use `argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecated(None, "Use `argmin` instead")(arg_min) # pylint: disable=used-before-assignment
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_max.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_min.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```python
x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
tf.abs(x) # [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
`int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128' input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__ +
("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__ +
("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated("2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float` that is the argument of each element in `input`. All elements in
`input` must be complex numbers of the form \\(a + bj\\), where *a*
is the real part and *b* is the imaginary part returned by the operation.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def angle(input, name=None):
r"""Returns the argument of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the argument of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part and *b* is the imaginary part.
The argument returned by this function is of the form \\(atan2(b, a)\\).
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.angle(input) ==> [2.0132, 1.056]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
x = tf.constant([1.8, 2.2], dtype=tf.float32)
tf.cast(x, tf.int32) # [1, 2], dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape,
y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
start = 3
limit = 18
delta = 3
tf.range(start, limit, delta) # [3, 6, 9, 12, 15]
start = 3
limit = 1
delta = -0.5
tf.range(start, limit, delta) # [3, 2.5, 2, 1.5]
limit = 5
tf.range(limit) # [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x) # 6
tf.reduce_sum(x, 0) # [2, 2, 2]
tf.reduce_sum(x, 1) # [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) # [[3], [3]]
tf.reduce_sum(x, [0, 1]) # 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.count_nonzero(x) # 3
tf.count_nonzero(x, 0) # [1, 2, 0]
tf.count_nonzero(x, 1) # [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) # [[1], [2]]
tf.count_nonzero(x, [0, 1]) # 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1., 1.], [2., 2.]])
tf.reduce_mean(x) # 1.5
tf.reduce_mean(x, 0) # [1.5, 1.5]
tf.reduce_mean(x, 1) # [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_all(x) # False
tf.reduce_all(x, 0) # [False, False]
tf.reduce_all(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[True, True], [False, False]])
tf.reduce_any(x) # True
tf.reduce_any(x, 0) # [True, True]
tf.reduce_any(x, 1) # [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
raw_max = reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True)
my_max = array_ops.stop_gradient(
array_ops.where(
gen_math_ops.is_finite(raw_max),
raw_max,
array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 2-D tensor `b`
# [[ 7, 8],
# [ 9, 10],
# [11, 12]]
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
# `a` * `b`
# [[ 58, 64],
# [139, 154]]
c = tf.matmul(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 3-D tensor `b`
# [[[13, 14],
# [15, 16],
# [17, 18]],
# [[19, 20],
# [21, 22],
# [23, 24]]]
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2])
# `a` * `b`
# [[[ 94, 100],
# [229, 244]],
# [[508, 532],
# [697, 730]]]
c = tf.matmul(a, b)
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
Aside from differentiability, `tf.accumulate_n` performs the same operation as
`tf.add_n`, but does not wait for all of its inputs to be ready before
beginning to sum. This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than
the inputs size.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32) # [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
if tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}"
.format(tensor_dtype, inputs[0].dtype))
if len(inputs) == 1:
return inputs[0]
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
weights = (ops.convert_to_tensor(weights, name="weights")
if weights is not None else constant_op.constant([], dtype))
return gen_math_ops.bincount(arr, output_size, weights)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims_static = [shape_a[i] for i in free]
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32), range(
axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(b, b_axes,
True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
|
xuleiboy1234/autoTitle
|
tensorflow/tensorflow/python/ops/math_ops.py
|
Python
|
mit
| 83,640 | 0.004639 |
#
# This file is part of CONCUSS, https://github.com/theoryinpractice/concuss/,
# and is Copyright (C) North Carolina State University, 2015. It is licensed
# under the three-clause BSD license; see LICENSE.
#
from collections import deque
from dp import KPattern, DPTable
from double_count import InclusionExclusion
from lib.util.misc import clear_output_line
from lib.decomposition import CombinationsSweep
from lib.graph.treedepth import treedepth
class PatternCounter(object):
"""
Run the decompose, count, and combine parts of the pipeline
The PatternCounter is responsible for creating a CountCombiner and a
DecompGenerator, then running the DecompGenerator, getting DPTable
objects from the CountCombiner for the decompositions, and returning
the final count from the whole graph.
"""
def __init__(self, G, multi, td_list, coloring, pattern_class=KPattern,
table_hints={}, decomp_class=CombinationsSweep,
combiner_class=InclusionExclusion, verbose=False,
big_component_file=None, tdd_file=None, dp_table_file=None,
colset_count_file=None):
"""
Create the CountCombiner and DecompGenerator objects
Arguments:
G: Host graph
H: Pattern graph
coloring: A (|H|+1)-centered coloring of G
pattern_class: The k-pattern class to use in dynamic programming
table_hints: probably-respected options for the DP table
decomp_class: DecompGenerator subclass
combiner_class: CountCombiner subclass
verbose: whether or not to print debugging information
"""
self.G = G
self.multi = multi
self.coloring = coloring
self.pattern_class = pattern_class
self.verbose = verbose
self.big_component_file = big_component_file
self.big_component = None
self.tdd_file = tdd_file
self.dp_table_file = dp_table_file
self.dp_table = None
self.colset_count_file = colset_count_file
self.combiners = [combiner_class(len(multi[idx]), coloring, table_hints, td=td_list[idx],
execdata_file=colset_count_file) for idx in range(len(multi))]
before_color_set_callbacks = [combiner.before_color_set for combiner in self.combiners]
after_color_set_callbacks = [combiner.after_color_set for combiner in self.combiners]
# TODO: calculate a lower bound on treedepth
self.decomp_generator = decomp_class(G, coloring, len(max(multi, key=len)),
min(td_list), len(min(multi, key=len)),
before_color_set_callbacks,
after_color_set_callbacks,
self.verbose)
def count_patterns_from_TDD(self, decomp, pat, idx):
"""
Count the number of occurrences of our pattern in the given treedepth
decomposition.
Arguments:
decomp: Treedepth decomposition of a graph
pat: The pattern that we are counting
idx: The index of our pattern in the multi-pattern list
"""
# Keep this table if the big component is the current component
keep_table = (self.big_component is decomp)
# Get a table object for this decomposition from the CountCombiner
table = self.combiners[idx].table(decomp)
# create a post order traversal ordering with a DFS to use in the DP
ordering = []
q = deque([decomp.root])
# print decomp.root, len(decomp),
# print [(i+1,self.coloring[i]) for i in decomp]
while q:
curr = q.pop()
ordering.append(curr)
if not decomp.hasLeaf(curr):
q.extend(reversed(decomp.children(curr)))
ordering.reverse()
# Perform dynamic programming on the treedepth decomposition in the
# post order traversal
computeLeaf = table.computeLeaf
computeInnerVertexSet = table.computeInnerVertexSet
computeInnerVertexSetCleanup = table.computeInnerVertexSetCleanup
computeInnerVertex = table.computeInnerVertex
pattern_class = self.pattern_class
# For each vertex in the TDD:
for v in ordering:
# If the vertex is a leaf
if decomp.hasLeaf(v):
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
# print " Pattern: ", pattern
computeLeaf(v, pattern, pat)
# If the vertex is internal:
else:
# Get counts for tuples of its children (join case)
for c_idx in range(2, len(decomp.children(v))+1):
leftChildren = tuple(decomp.children(v)[:c_idx])
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
# print " Pattern: ", pattern
computeInnerVertexSet(leftChildren, pattern, pat)
# Possibly clean up some unneeded data structures
computeInnerVertexSetCleanup(leftChildren, pat)
# Combine child counts (forget case)
for pattern in pattern_class.allPatterns(pat,
decomp.depth()):
computeInnerVertex(v, pattern, pat)
# leaf = G.leaves().pop()
# for pattern in patternClass.allPatterns(H, G.depth()):
# print G.isIsomorphism(leaf, pattern), pattern
# Get the total count for the whole TDD
trivialPattern = pattern_class(pat.nodes, None, pat)
retVal = table.lookup((decomp.root,), trivialPattern)
# if retVal > 0:
# print "Return value", retVal
# print table
# Keep the table if this tdd is the big component
if keep_table:
self.dp_table = table
return retVal
def count_patterns(self):
"""Count the number of occurrences of our pattern in our host graph."""
# Make a list to store counts of patterns specified
final_count = [0]*len(self.multi)
# For every TDD given to us by the decomposition generator
for tdd in self.decomp_generator:
# Remember the largest component we've seen if we're making
# visualization output
if self.big_component_file is not None:
if self.big_component is None:
self.big_component = tdd
elif len(self.big_component) < len(tdd):
self.big_component = tdd
# Count patterns in that TDD
for idx, pat in enumerate(self.multi):
count = self.count_patterns_from_TDD(tdd, pat, idx)
# Combine the count from the TDD
self.combiners[idx].combine_count(count)
# Populate the list of counts that will be returned
for idx in range(len(self.multi)):
final_count[idx] += self.combiners[idx].get_count()
# Write the largest component to a file
if self.big_component_file is not None:
from lib.graph.graphformats import write_edgelist
write_edgelist(self.big_component, self.big_component_file)
# Write the TDD of the largest component to a file
if self.tdd_file is not None:
for v in self.big_component.nodes:
parent = self.big_component.vertexRecords[v].parent
if parent is not None:
print >> self.tdd_file, v, parent
# Write the DP table for the largest component to a file
if self.dp_table_file is not None:
# Write the table in a machine-readable format
dp_table = self.dp_table.table
for v_tup in sorted(dp_table.keys()):
self.dp_table_file.write(str([v for v in v_tup]) + " {\n")
for pattern, count in sorted(dp_table[v_tup].iteritems()):
if count > 0:
self.dp_table_file.write("\t" + str(count) + "; ")
vString = [v for v in pattern.vertices]
bString = [str(v) + ":" + str(i) for v, i in
pattern.boundary.iteritems()]
bString = '[' + ', '.join(bString) + ']'
self.dp_table_file.write(
str(vString) + "; " + str(bString) + "\n")
self.dp_table_file.write("}\n")
# Return the totals for the whole graph
return final_count
|
TheoryInPractice/CONCUSS
|
lib/pattern_counting/pattern_counter.py
|
Python
|
bsd-3-clause
| 8,882 | 0.000788 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-10 18:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0028_auto_20170113_2133'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='address1',
),
migrations.RemoveField(
model_name='profile',
name='address2',
),
migrations.RemoveField(
model_name='profile',
name='address3',
),
migrations.AddField(
model_name='profile',
name='address',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
mitodl/micromasters
|
profiles/migrations/0029_merge_address_fields.py
|
Python
|
bsd-3-clause
| 794 | 0 |
__version__ = '1.2.0'
from .oss_authorizers import OssAuthorizer
from .oss_file_operation import OssFileOperation
from .oss_fs import OssFS
from .oss_fs_impl import OssFsImpl
|
aliyun/oss-ftp
|
ossftp/__init__.py
|
Python
|
mit
| 176 | 0 |
import hashlib
import mimetypes
import os
import posixpath
import re
from time import time
from urlparse import urlsplit, urlunsplit
from werkzeug.exceptions import NotFound
from werkzeug.http import is_resource_modified, http_date
from spa.static.handlers import StaticHandler
from spa.utils import clean_path
class HashCache(object):
def __init__(self):
self.path_hashes = {}
self.contents = {}
def get_path_hash(self, path):
return self.path_hashes.get(path)
def set_path_hash(self, path, path_hash):
self.path_hashes[path] = path_hash
def get_contents(self, path):
return self.contents.get(path)
def set_contents(self, path, contents):
self.contents[path] = contents
class CacheBustingStaticHandler(StaticHandler):
css_url_patterns = (
(re.compile(r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""", re.IGNORECASE),
"""url("{hashed_url}")"""),
(re.compile(r"""(@import\s*["']\s*(.*?)["'])""", re.IGNORECASE),
"""@import url("{hashed_url}")"""),
)
def __init__(self, app, req, params, directory, hash_cache, **kwargs):
self.hash_cache = hash_cache
return super(CacheBustingStaticHandler, self).__init__(
app, req, params, directory, **kwargs
)
def get(self, filepath):
unhashed_path, path_hash = parse_hashed_filepath(filepath)
if unhashed_path is None:
return NotFound()
if self.hash_cache.get_path_hash(unhashed_path) is None:
# compute hash, and cache it.
file = self.get_file(unhashed_path)
if file is None:
return NotFound()
try:
hash_str = get_hash(file.handle)
self.hash_cache.set_path_hash(unhashed_path, hash_str)
finally:
file.handle.close()
# If hash we were passed doesn't equal the one we've computed and
# cached, then 404.
if path_hash != self.hash_cache.get_path_hash(unhashed_path):
return NotFound()
# For CSS stylesheets only, we'll rewrite content so that url()
# functions will point to hashed filenames instead of unhashed. The
# rewritten CSS content will be kept in memory.
if mimetypes.guess_type(filepath)[0] == 'text/css':
return self.make_css_response(unhashed_path)
return super(CacheBustingStaticHandler, self).get(unhashed_path)
def make_css_response(self, filepath):
def resp(environ, start_response):
file = self.get_file(filepath)
try:
headers = [('Date', http_date())]
if self.cache:
timeout = self.cache_timeout
etag = self.generate_etag(file.mtime, file.size, file.name)
headers += [
('Etag', '"%s"' % etag),
('Cache-Control', 'max-age=%d, public' % timeout)
]
if not is_resource_modified(environ, etag, last_modified=file.mtime):
start_response('304 Not Modified', headers)
return []
headers.append(('Expires', http_date(time() + timeout)))
else:
headers.append(('Cache-Control', 'public'))
contents = self.hash_cache.get_contents(filepath)
if contents is None:
contents = file.handle.read()
for pat, tpl in self.css_url_patterns:
converter = self.get_converter(tpl)
contents = pat.sub(converter, contents)
self.hash_cache.set_contents(filepath, contents)
headers.extend((
('Content-Type', file.mimetype),
('Content-Length', len(contents)),
('Last-Modified', http_date(file.mtime))
))
start_response('200 OK', headers)
return [contents]
finally:
file.handle.close()
return resp
def get_converter(self, tpl):
def converter(matchobj):
matched, url = matchobj.groups()
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return url
return tpl.format(hashed_url=self.convert_css_url(url))
return converter
def convert_css_url(self, css_url):
split_url = urlsplit(css_url)
url_path = split_url.path
if not url_path.startswith('/'):
abs_url_path = self.make_path_absolute(url_path)
else:
abs_url_path = posixpath.realpath(url_path)
prefix = self.get_url_prefix()
# now make the path as it would be passed in to this handler when
# requested from the web. From there we can use existing methods on the
# class to resolve to a real file.
_, _, content_filepath = abs_url_path.partition(prefix)
content_filepath = clean_path(content_filepath)
content_file_hash = self.hash_cache.get_path_hash(content_filepath)
if content_file_hash is None:
content_file = self.get_file(content_filepath)
if content_file is None:
return 'NOT FOUND: "%s"' % url_path
try:
content_file_hash = get_hash(content_file.handle)
finally:
content_file.handle.close()
parts = list(split_url)
parts[2] = add_hash_to_filepath(url_path, content_file_hash)
url = urlunsplit(parts)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in css_url:
parts = list(urlsplit(url))
if not parts[3]:
parts[2] += '?'
url = urlunsplit(parts)
return url
def get_url_prefix(self):
"""
Return the mount point for this handler. So if you had a route like
this:
('/foo/bar/static/<path:filepath>', 'foo', Handler)
Then this function should return '/foo/bar/static/'
"""
env = self.request.environ
filepath = self.params['filepath']
prefix, _, _ = (env['SCRIPT_NAME'] +
env['PATH_INFO']).rpartition(filepath)
return prefix
def make_path_absolute(self, path):
"""
Given a relative url found inside the CSS file we're currently serving,
return an absolute form of that URL.
"""
env = self.request.environ
pinfo = posixpath.dirname(env['PATH_INFO'])
return posixpath.realpath(env['SCRIPT_NAME'] + pinfo + '/' + path)
def parse_hashed_filepath(filename, hash_len=12):
"""
Given a name like '/static/my_file.deadbeef1234.txt', return a tuple of the file name
without the hash, and the hash itself, like this:
('/static/my_file.txt', 'deadbeef1234')
If no hash part is found, then return (None, None).
"""
pat = '^(?P<before>.*)\.(?P<hash>[0-9,a-f]{%s})(?P<after>.*?)$' % hash_len
m = re.match(pat, filename)
if m is None:
return None, None
parts = m.groupdict()
return '{before}{after}'.format(**parts), parts['hash']
def add_hash_to_filepath(filepath, hash_str):
path, filename = os.path.split(filepath)
root, ext = os.path.splitext(filename)
return os.path.join(path, "%s.%s%s" % (root, hash_str, ext))
def get_hash(lines, hash_len=12):
md5 = hashlib.md5()
for line in lines:
md5.update(line)
return md5.hexdigest()[:hash_len]
class SmartStatic(object):
"""
A factory for making CacheBustingStaticHandler instances that share a cache
instance.
"""
def __init__(self, directory):
self.directory = directory
self.hash_cache = HashCache()
def __call__(self, app, req, params, **kwargs):
return CacheBustingStaticHandler(app, req, params,
directory=self.directory,
hash_cache=self.hash_cache,
**kwargs)
|
dmonroy/spa
|
spa/static/smart.py
|
Python
|
bsd-3-clause
| 8,271 | 0.000484 |
from __future__ import unicode_literals
import frappe, unittest
from werkzeug.wrappers import Request
from werkzeug.test import EnvironBuilder
from frappe.website import render
def set_request(**kwargs):
builder = EnvironBuilder(**kwargs)
frappe.local.request = Request(builder.get_environ())
class TestWebsite(unittest.TestCase):
def test_page_load(self):
set_request(method='POST', path='login')
response = render.render()
self.assertTrue(response.status_code, 200)
html = response.get_data()
self.assertTrue('/* login-css */' in html)
self.assertTrue('// login.js' in html)
self.assertTrue('<!-- login.html -->' in html)
|
maxtorete/frappe
|
frappe/tests/test_website.py
|
Python
|
mit
| 649 | 0.020031 |
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2009 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Python classes
#
#-------------------------------------------------------------------------
import logging
_LOG = logging.getLogger(".gui.editors.EditNote")
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
from gi.repository import GObject
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
from gramps.gen.config import config
from .editprimary import EditPrimary
from .displaytabs import GrampsTab, NoteBackRefList
from ..widgets import (MonitoredDataType, MonitoredCheckbox,
MonitoredEntry, PrivacyButton, MonitoredTagList)
from gramps.gen.lib import Note
from gramps.gen.db import DbTxn
from ..dialog import ErrorDialog
from ..glade import Glade
from gramps.gen.const import URL_MANUAL_SECT2
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = URL_MANUAL_SECT2
WIKI_HELP_SEC = _('manual|Editing_information_about_notes')
#-------------------------------------------------------------------------
#
# NoteTab
#
#-------------------------------------------------------------------------
class NoteTab(GrampsTab):
"""
This class provides the tabpage of the note
"""
def __init__(self, dbstate, uistate, track, name, widget):
"""
@param dbstate: The database state. Contains a reference to
the database, along with other state information. The GrampsTab
uses this to access the database and to pass to and created
child windows (such as edit dialogs).
@type dbstate: L{DbState.DbState}
@param uistate: The UI state. Used primarily to pass to any created
subwindows.
@type uistate: L{DisplayState.DisplayState}
@param track: The window tracking mechanism used to manage windows.
This is only used to pass to generted child windows.
@type track: list
@param name: Notebook label name
@type name: str/unicode
@param widget: widget to be shown in the tab
@type widget: gtk widget
"""
GrampsTab.__init__(self, dbstate, uistate, track, name)
eventbox = Gtk.EventBox()
eventbox.add(widget)
self.pack_start(eventbox, True, True, 0)
self._set_label(show_image=False)
eventbox.connect('key_press_event', self.key_pressed)
self.show_all()
def is_empty(self):
"""
Override base class
"""
return False
#-------------------------------------------------------------------------
#
# EditNote
#
#-------------------------------------------------------------------------
class EditNote(EditPrimary):
def __init__(self, dbstate, uistate, track, note, callback=None,
callertitle = None, extratype = None):
"""Create an EditNote window. Associate a note with the window.
@param callertitle: Text passed by calling object to add to title
@type callertitle: str
@param extratype: Extra L{NoteType} values to add to the default types.
They are removed from the ignorelist of L{NoteType}.
@type extratype: list of int
"""
self.callertitle = callertitle
self.extratype = extratype
EditPrimary.__init__(self, dbstate, uistate, track, note,
dbstate.db.get_note_from_handle,
dbstate.db.get_note_from_gramps_id, callback)
def empty_object(self):
"""Return an empty Note object for comparison for changes.
It is used by the base class L{EditPrimary}.
"""
empty_note = Note();
if self.extratype:
empty_note.set_type(self.extratype[0])
return empty_note
def get_menu_title(self):
if self.obj.get_handle():
if self.callertitle :
title = _('Note: %(id)s - %(context)s') % {
'id' : self.obj.get_gramps_id(),
'context' : self.callertitle
}
else :
title = _('Note: %s') % self.obj.get_gramps_id()
else:
if self.callertitle :
title = _('New Note - %(context)s') % {
'context' : self.callertitle
}
else :
title = _('New Note')
return title
def get_custom_notetypes(self):
return self.dbstate.db.get_note_types()
def _local_init(self):
"""Local initialization function.
Perform basic initialization, including setting up widgets
and the glade interface. It is called by the base class L{EditPrimary},
and overridden here.
"""
self.top = Glade()
win = self.top.toplevel
self.set_window(win, None, self.get_menu_title())
self.setup_configs('interface.note', 700, 500)
vboxnote = self.top.get_object('vbox131')
notebook = self.top.get_object('note_notebook')
#recreate start page as GrampsTab
notebook.remove_page(0)
self.ntab = NoteTab(self.dbstate, self.uistate, self.track,
_('_Note'), vboxnote)
self.track_ref_for_deletion("ntab")
self.build_interface()
def _setup_fields(self):
"""Get control widgets and attach them to Note's attributes."""
self.type_selector = MonitoredDataType(
self.top.get_object('type'),
self.obj.set_type,
self.obj.get_type,
self.db.readonly,
custom_values=self.get_custom_notetypes(),
ignore_values=self.obj.get_type().get_ignore_list(self.extratype))
self.check = MonitoredCheckbox(
self.obj,
self.top.get_object('format'),
self.obj.set_format,
self.obj.get_format,
readonly = self.db.readonly)
self.gid = MonitoredEntry(
self.top.get_object('id'),
self.obj.set_gramps_id,
self.obj.get_gramps_id,
self.db.readonly)
self.tags = MonitoredTagList(
self.top.get_object("tag_label"),
self.top.get_object("tag_button"),
self.obj.set_tag_list,
self.obj.get_tag_list,
self.db,
self.uistate, self.track,
self.db.readonly)
self.priv = PrivacyButton(
self.top.get_object("private"),
self.obj, self.db.readonly)
def _connect_signals(self):
"""Connects any signals that need to be connected.
Called by the init routine of the base class L{EditPrimary}.
"""
self.define_ok_button(self.top.get_object('ok'), self.save)
self.define_cancel_button(self.top.get_object('cancel'))
self.define_help_button(self.top.get_object('help'),
WIKI_HELP_PAGE, WIKI_HELP_SEC)
def _connect_db_signals(self):
"""
Connect any signals that need to be connected.
Called by the init routine of the base class (_EditPrimary).
"""
self._add_db_signal('note-rebuild', self._do_close)
self._add_db_signal('note-delete', self.check_for_close)
def _create_tabbed_pages(self):
"""Create the notebook tabs and inserts them into the main window."""
notebook = self.top.get_object("note_notebook")
self._add_tab(notebook, self.ntab)
handles = self.dbstate.db.find_backlink_handles(self.obj.handle)
self.rlist = NoteBackRefList(self.dbstate,
self.uistate,
self.track,
handles)
self.backref_tab = self._add_tab(notebook, self.rlist)
self.track_ref_for_deletion("rlist")
self.track_ref_for_deletion("backref_tab")
self._setup_notebook_tabs(notebook)
def build_interface(self):
self.texteditor = self.top.get_object('texteditor')
self.texteditor.set_editable(not self.dbstate.db.readonly)
self.texteditor.set_wrap_mode(Gtk.WrapMode.WORD)
# create a formatting toolbar
if not self.dbstate.db.readonly:
vbox = self.top.get_object('container')
toolbar, self.action_group = self.texteditor.create_toolbar(
self.uistate.uimanager, self.window)
vbox.pack_start(toolbar, False, False, 0)
self.texteditor.set_transient_parent(self.window)
# setup initial values for textview and textbuffer
if self.obj:
self.empty = False
with self.texteditor.undo_disabled():
self.texteditor.set_text(self.obj.get_styledtext())
# Reset the undoable buffer:
self.texteditor.reset()
_LOG.debug("Initial Note: %s" % str(self.texteditor.get_text()))
else:
self.empty = True
def build_menu_names(self, person):
"""
Provide the information needed by the base class to define the
window management menu entries.
"""
return (_('Edit Note'), self.get_menu_title())
def _post_init(self):
self.texteditor.grab_focus()
def update_note(self):
"""Update the Note object with current value."""
if self.obj:
text = self.texteditor.get_text()
self.obj.set_styledtext(text)
_LOG.debug(str(text))
def close(self, *obj):
"""Called when cancel button clicked."""
self.update_note()
super().close()
def save(self, *obj):
"""Save the data."""
self.ok_button.set_sensitive(False)
self.update_note()
if self.object_is_empty():
ErrorDialog(_("Cannot save note"),
_("No data exists for this note. Please "
"enter data or cancel the edit."),
parent=self.window)
self.ok_button.set_sensitive(True)
return
(uses_dupe_id, id) = self._uses_duplicate_id()
if uses_dupe_id:
msg1 = _("Cannot save note. ID already exists.")
msg2 = _("You have attempted to use the existing Gramps ID with "
"value %(id)s. This value is already used. Please "
"enter a different ID or leave "
"blank to get the next available ID value.") % {
'id' : id }
ErrorDialog(msg1, msg2, parent=self.window)
self.ok_button.set_sensitive(True)
return
if not self.obj.handle:
with DbTxn(_("Add Note"),
self.db) as trans:
self.db.add_note(self.obj, trans)
else:
if self.data_has_changed():
with DbTxn(_("Edit Note"),
self.db) as trans:
if not self.obj.get_gramps_id():
self.obj.set_gramps_id(self.db.find_next_note_gramps_id())
self.db.commit_note(self.obj, trans)
if self.callback:
self.callback(self.obj.get_handle())
self._do_close()
class DeleteNoteQuery:
def __init__(self, dbstate, uistate, note, the_lists):
self.note = note
self.db = dbstate.db
self.uistate = uistate
self.the_lists = the_lists
def query_response(self):
with DbTxn(_("Delete Note (%s)") % self.note.get_gramps_id(),
self.db) as trans:
self.db.disable_signals()
(person_list, family_list, event_list, place_list, source_list,
citation_list, media_list, repo_list) = self.the_lists
note_handle = self.note.get_handle()
for handle in person_list:
person = self.db.get_person_from_handle(handle)
if person:
person.remove_note(note_handle)
self.db.commit_person(person, trans)
for handle in family_list:
family = self.db.get_family_from_handle(handle)
if family:
family.remove_note(note_handle)
self.db.commit_family(family, trans)
for handle in event_list:
event = self.db.get_event_from_handle(handle)
if event:
event.remove_note(note_handle)
self.db.commit_event(event, trans)
for handle in place_list:
place = self.db.get_place_from_handle(handle)
if place:
place.remove_note(note_handle)
self.db.commit_place(place, trans)
for handle in source_list:
source = self.db.get_source_from_handle(handle)
if source:
source.remove_note(note_handle)
self.db.commit_source(source, trans)
for handle in citation_list:
citation = self.db.get_citation_from_handle(handle)
if citation:
citation.remove_note(note_handle)
self.db.commit_citation(citation, trans)
for handle in media_list:
media = self.db.get_media_from_handle(handle)
if media:
media.remove_note(note_handle)
self.db.commit_media(media, trans)
for handle in repo_list:
repo = self.db.get_repository_from_handle(handle)
if repo:
repo.remove_note(note_handle)
self.db.commit_repository(repo, trans)
self.db.enable_signals()
self.db.remove_note(note_handle, trans)
|
dermoth/gramps
|
gramps/gui/editors/editnote.py
|
Python
|
gpl-2.0
| 15,225 | 0.00335 |
#!/usr/bin/python
#======================================================================
#
# Project : hpp_IOStressTest
# File : Libs/IOST_WAboutDialog/IOST_AboutDialog.py
# Date : Sep 21, 2016
# Author : HuuHoang Nguyen
# Contact : hhnguyen@apm.com
# : hoangnh.hpp@gmail.com
# License : MIT License
# Copyright : 2016
# Description: The hpp_IOStressTest is under the MIT License, a copy of license which may be found in LICENSE
#
#======================================================================
import io
import os
import re
import operator
import sys
import base64
import time
from IOST_Basic import *
from IOST_Config import *
import gtk
import gtk.glade
import gobject
#======================================================================
class IOST_AboutDialog():
def __init__(self, glade_filename, window_name, object_name ,main_builder=None):
"This is a function get of Diaglog Help -> About Window"
self.IOST_AboutDialog_WindowName = window_name
self.IOST_AboutDialog_ObjectName = object_name
if not main_builder:
self.IOST_AboutDialog_Builder = gtk.Builder()
self.IOST_AboutDialog_Builder.add_from_file(glade_filename)
self.IOST_AboutDialog_Builder.connect_signals(self)
else:
self.IOST_AboutDialog_Builder = main_builder
# self.IOST_Objs[window_name][window_name+ object_name] = self.IOST_AboutDialog_Builder.get_object(window_name+object_name)
# self.IOST_Objs[window_name][window_name+ object_name].set_version(self.IOST_Data["ProjectVersion"])
self.CreateObjsDictFromDict(self.IOST_AboutDialog_WindowName,
self.IOST_Objs[self.IOST_AboutDialog_WindowName],
self.IOST_AboutDialog_Builder,
0)
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_ObjectName].set_version(self.IOST_Data["ProjectVersion"])
def Run(self, window_name, object_name):
self.IOST_Objs[window_name][object_name].run()
self.IOST_Objs[window_name][object_name].hide()
def ActiveLink(self, object_name):
self.IOST_Objs[self.IOST_AboutDialog_WindowName][ self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogActionArea_destroy(self, object, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_object_name].hide()
def on_IOST_WHelpAbout_DialogVB_button_press_event(self, widget, event, data=None):
""
self.IOST_Objs[self.IOST_AboutDialog_WindowName][self.IOST_AboutDialog_objectt_name].hide()
|
HPPTECH/hpp_IOSTressTest
|
IOST_0.23/Libs/IOST_AboutDialog/IOST_AboutDialog.py
|
Python
|
mit
| 3,089 | 0.00777 |
'''
Copyright 2010 - Greg Hellings
This file is part of the Automated FTP Dominator.
The Automated FTP Dominator is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
The Automated FTP Dominator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with the Automated FTP Dominator. If not, see
<http://www.gnu.org/licenses/>.
'''
from PyQt4 import QtGui, QtCore
from gui.dialogs.DomEditEntry import DomEditEntryDialog
class Config(QtGui.QMainWindow):
changes = None
configObject = None
_configname = 'default'
def __init__(self, config):
QtGui.QMainWindow.__init__(self, None)
self._config = config
self.setGeometry(100, 150, 600, 400)
self.setWindowTitle('Configure Destinations')
# Let's make ourselves a nice little layout
conf_hbox = self.getChooser(self)
# Let's make the menubar
menubar = self.makeMenubar()
self.setMenuBar(menubar)
# Save changes?
save = self.makeSaveButton()
self.siteList = QtGui.QListWidget()
self.siteList.setSortingEnabled(True)
#self.siteList.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# Main vertical layout
vbox = QtGui.QVBoxLayout()
vbox.addLayout(conf_hbox)
vbox.addWidget(self.siteList)
#vbox.addStretch(1) # This allows us to not occupy the entire vertical space of the window.
vbox.addLayout(save)
centralWidget = QtGui.QWidget(self)
centralWidget.setLayout(vbox)
self.setCentralWidget(centralWidget)
if len(self._config._list) > 0: self.activateConfig(self.conf_list.currentText())
else: self.newConfig()
#################################################################################################################################
#################################################### UI Helpers #################################################################
#################################################################################################################################
# Makes the top line chooser/new box
def getChooser(self, widget):
conf_hbox = QtGui.QHBoxLayout()
conf_hbox.addWidget(QtGui.QLabel('Select Configuration'))
# First we create the label with all the configurations currently available
self.conf_list = QtGui.QComboBox(widget)
for this_config in self._config._list:
self.conf_list.addItem(this_config)
conf_hbox.addWidget(self.conf_list)
self.connect(self.conf_list, QtCore.SIGNAL('currentIndexChanged(const QString&)'), self.activateConfig)
# Populate the first config available
# And an "Add New" box
self.conf_newbutton = QtGui.QPushButton('New')
widget.connect(self.conf_newbutton, QtCore.SIGNAL('clicked()'), self.newConfig) #self, QtCore.SLOT('newConfig()'))
conf_hbox.addWidget(self.conf_newbutton)
conf_hbox.addStretch(1) # This makes the line not take up the entire width of the application
return conf_hbox
# Creates a menu bar and returns it to the caller - sadly they only look right if we're working with a QMainWindow
def makeMenubar(self):
# Make a new menubar
menubar = QtGui.QMenuBar()
# First menu entry - File, as always
file = menubar.addMenu('&File')
# Last file entry - as always
file.addSeparator()
exit = QtGui.QAction('E&xit', self)
exit.setShortcut('Ctrl+Q')
self.connect(exit, QtCore.SIGNAL('triggered()'), QtCore.SLOT('close()'))
file.addAction(exit)
return menubar
# Returns a layout that contains a "Save Contents" button
def makeSaveButton(self):
hbox = QtGui.QHBoxLayout()
# The + and - buttons
addButton = QtGui.QPushButton(QtGui.QIcon('icons/add_16x16.png'), 'Add Entry')
self.connect(addButton, QtCore.SIGNAL('clicked()'), self.addEntry)
editButton = QtGui.QPushButton(QtGui.QIcon('icons/edit_16x16.png'), 'Edit Entry')
self.connect(editButton, QtCore.SIGNAL('clicked()'), self.editEntry)
delButton = QtGui.QPushButton(QtGui.QIcon('icons/delete_16x16.png'), 'Delete Entry')
self.connect(delButton, QtCore.SIGNAL('clicked()'), self.delEntry)
hbox.addWidget(addButton)
hbox.addWidget(editButton)
hbox.addWidget(delButton)
# Now the save button
hbox.addStretch(1)
saveButton = QtGui.QPushButton('Save Changes')
self.connect(saveButton, QtCore.SIGNAL('clicked()'), self.saveConfig)
hbox.addWidget(saveButton)
return hbox
# Listens for changes in the active configuration and will update the UI to reflect that
def activateConfig(self, config):
# Confirm that we want to discard the changes
if not self._confirmDiscardChanges():
return None
# Having attained that permission, let us proceed onward with great haste
try:
self.configObject = self._config.getConfig(str(config))
except Exception:
QtGui.QMessageBox.critical(self, 'Error', 'Error opening config file.')
self.configObject = None
self.siteList.clear()
if self.configObject != None:
for entry in self.configObject:
QtGui.QListWidgetItem(entry['name'] + '\t' + entry['destination'], self.siteList)
else:
self.configObject = []
# We don't have changes anymore
self.changes = False
self._configname = config
# We like sortings!
self.siteList.sortItems()
###############################################################################################################################
################################################### Listeners #################################################################
###############################################################################################################################
# Slot where the new button signal is connected
def newConfig(self):
# Confirm that it's OK for us to discard changes
if not self._confirmDiscardChanges(): return None
name, ok = QtGui.QInputDialog.getText(self, 'New Config', 'Name of new configuration', QtGui.QLineEdit.Normal, 'default')
name = name.simplified()
if ok and name != '':
self._configname = name
self.configObject = []
self.conf_list.addItem(name)
def saveConfig(self):
self._config.saveConfig(self._configname, self.configObject)
QtGui.QMessageBox.information(self, 'Saved', 'Configuration saved')
self.changes = False
# Displays a dialog that will allow the user to
# create a new element in the current configuration
def addEntry(self):
dialog = DomEditEntryDialog(self, None)
value = dialog.exec_()
# Only if the user really pushed the 'OK' or 'Enter' button/key
if value == QtGui.QDialog.Accepted:
name = dialog.getSiteName()
value = dialog.getSiteURL()
user = dialog.getUser()
pw = dialog.getPassword()
# Makes sure it doesn't duplicate the name of another site
duplicate = False
for element in self.configObject:
if element['name'] == name: duplicate = True
# Only proceed if we are in a valid place
if not duplicate:
self.configObject.append({'name' : str(name), 'destination' : str(value), 'user' : str(user), 'pw' : str(pw)})
# Displays in the dialog
QtGui.QListWidgetItem(name + '\t' + value, self.siteList)
# Flag the current entry as changed
self.changes = True
# Sorting is fun!
self.siteList.sortItems()
else:
print 'Duplicate detected'
QtGui.QMessageBox.warning(self, 'Duplicate Detected', 'That entry already exists, ignoring.')
else:
print 'Rejecting'
def delEntry(self):
item = self.siteList.takeItem(self.siteList.currentRow())
text = str(item.text())
name, trash, url = text.partition('\t')
# Remove from our list
for obj in self.configObject:
if obj['name'] == name: self.configObject.remove(obj)
# Make sure we know there are changes pending
self.changes = True
def editEntry(self):
# Find out which one we're on
item = self.siteList.currentItem()
name, trash, url = str(item.text()).partition('\t')
entry = None
for obj in self.configObject:
if obj['name'] == name: entry = obj
# Create & show the dialog
dialog = DomEditEntryDialog(self, entry)
value = dialog.exec_()
# Process answers
if value == QtGui.QDialog.Accepted:
# Iterate over the configs
for obj in self.configObject:
if obj['name'] == name:
idx = self.configObject.index(obj)
self.configObject[idx]['name'] = str(dialog.getSiteName())
self.configObject[idx]['destination'] = str(dialog.getSiteURL())
self.configObject[idx]['user'] = str(dialog.getUser())
self.configObject[idx]['pw'] = str(dialog.getPassword())
item.setText(self.configObject[idx]['name'] + '\t' + self.configObject[idx]['destination'])
break
#########################################################################################################################################
##################################################### Other Helper Functions ############################################################
#########################################################################################################################################
def _confirmDiscardChanges(self):
# This is the first execution
if self.changes == None:
self.changes = False
return True
elif self.changes == True:
# Ask the user if they wish to discard the changes
ok = QtGui.QMessageBox.question(self, 'Confirm Discard', 'Are you sure you wish to discard unsaved changes?', 'Yes', 'No')
if ok == 0:
return True
else:
return False
else:
# There are no changes, we can proceed
return True
|
greg-hellings/Automated-FTP-Dominator
|
gui/window.py
|
Python
|
gpl-3.0
| 9,748 | 0.029442 |
from collections import OrderedDict
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy as _
from oscar.apps.search import facets
FACET_COUNTS = {
u'dates': {},
u'fields': {
'category': [('Fiction', 12), ('Horror', 6), ('Comedy', 3)],
'product_class': [('Book', 18), ('DVD', 3)],
'rating': [],
},
u'queries': {
u'price_exact:[0 TO 20]': 15,
u'price_exact:[20 TO 40]': 5,
u'price_exact:[40 TO 60]': 1,
u'price_exact:[60 TO *]': 0,
}
}
FACET_COUNTS_WITH_PRICE_RANGE_SELECTED = {
u'dates': {},
u'fields': {
'category': [('Fiction', 12), ('Horror', 6), ('Comedy', 3)],
'product_class': [('Book', 18), ('DVD', 3)],
'rating': [],
},
u'queries': {
u'price_exact:[0 TO 20]': 0,
u'price_exact:[20 TO 40]': 21,
u'price_exact:[40 TO 60]': 0,
u'price_exact:[60 TO *]': 0,
}
}
SEARCH_FACETS = {
'fields': OrderedDict([
('product_class', {'name': _('Type'), 'field': 'product_class'}),
('rating', {'name': _('Rating'), 'field': 'rating'}),
('category', {'name': _('Category'), 'field': 'category'}),
]),
'queries': OrderedDict([
('price_range',
{
'name': _('Price range'),
'field': 'price',
'queries': [
(_('0 to 20'), u'[0 TO 20]'),
(_('20 to 40'), u'[20 TO 40]'),
(_('40 to 60'), u'[40 TO 60]'),
(_('60+'), u'[60 TO *]'),
]
}),
]),
}
@override_settings(OSCAR_SEARCH_FACETS=SEARCH_FACETS)
class TestFacetMunger(TestCase):
def test_with_no_facets_selected(self):
munger = facets.FacetMunger(
path='/search?q=test',
selected_multi_facets={},
facet_counts=FACET_COUNTS)
data = munger.facet_data()
self.assertTrue('category' in data)
self.assertEqual(3, len(data['category']['results']))
# Check a sample facet dict has the right keys
datum = data['category']['results'][0]
for key in ('count', 'disabled', 'name', 'select_url',
'selected', 'show_count'):
self.assertTrue(key in datum)
self.assertEqual(datum['count'], 12)
self.assertEqual(datum['name'], 'Fiction')
self.assertFalse(datum['selected'])
def test_pagination_params_are_reset(self):
munger = facets.FacetMunger(
path='/search?q=test&page=2',
selected_multi_facets={},
facet_counts=FACET_COUNTS)
data = munger.facet_data()
# Check a sample facet dict has the right keys
for facet_data in data.values():
for result in facet_data['results']:
self.assertTrue('page' not in result['select_url'])
def test_with_price_facets_selected(self):
munger = facets.FacetMunger(
path='/search?q=test&selected_facets=price_exact%3A%5B20+TO+40%5D',
selected_multi_facets={'price_exact': [u'[20 TO 40]']},
facet_counts=FACET_COUNTS_WITH_PRICE_RANGE_SELECTED)
data = munger.facet_data()
self.assertTrue('price_range' in data)
self.assertEqual(4, len(data['price_range']['results']))
# Check a sample facet dict has the right keys
datum = data['price_range']['results'][1]
for key in ('count', 'disabled', 'name', 'deselect_url',
'selected', 'show_count'):
self.assertTrue(key in datum)
self.assertEqual(datum['count'], 21)
self.assertTrue(datum['selected'])
|
QLGu/django-oscar
|
tests/unit/search/munger_tests.py
|
Python
|
bsd-3-clause
| 3,715 | 0.000269 |
#
# Autogenerated by Thrift Compiler (0.8.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException
from ttypes import *
|
humangeo/rawes
|
rawes/thrift_elasticsearch/constants.py
|
Python
|
apache-2.0
| 220 | 0 |
"""
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
from .utils.fixes import unique
from .utils import check_arrays, array2d
__all__ = ['LDA']
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X, y = check_arrays(X, y, sparse_format='dense')
self.classes_, y = unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = (-0.5 * np.sum(self.coef_ ** 2, axis=1) +
np.log(self.priors_))
return self
@property
def classes(self):
warnings.warn("LDA.classes is deprecated and will be removed in 0.14. "
"Use LDA.classes_ instead.", DeprecationWarning,
stacklevel=2)
return self.classes_
def _decision_function(self, X):
X = array2d(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = array2d(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
|
mrshu/scikit-learn
|
sklearn/lda.py
|
Python
|
bsd-3-clause
| 9,304 | 0.000215 |
"""
simpleSetup2: runs POST the JS setup
NOTE: for 1.2 USERS (and their signatures) still done here. Next jsSetup will take this over and the User setup part
will be removed from here.
"""
import os
import sys
import logging
import time
sys.path = ['rasUtilities'] + sys.path
import OSEHRASetup
from OSEHRAHelper import ConnectToMUMPS, PROMPT
logging.basicConfig(level=logging.INFO,
#filename='debug.log',
#format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
"""
ConnectToMUMPS relies on environment:
- is GTM => defined os.getenv('gtm_dist') == /home/nodevista/lib/gtm
- is Linux => defined sys.platform == 'linux2'
"""
# print "Platform", sys.platform, "GT.M MUMPS VM", os.getenv('gtm_dist'), "GTM Prompt", os.getenv("gtm_prompt")
LOGFILE = '/home/nodevista/log/simpleSetup2.txt'
"""
Expect to be called from Shell - PRINT can be read with
result=`python simpleSetup.py`
if [ "$result" != "OK" ]; then ...
"""
def simpleSetup2():
try:
print "Connecting to MUMPS roll n scroll ..."
VistA=ConnectToMUMPS(LOGFILE)
except:
print "EXIT_PYS_CANT_CONNECT_TO_MUMPS"
return
# NB: simpleSetup and postImportSetupBasics should go too
try:
print "Now setting up Users (signatures only now) ..."
postImportSetupUsers(VistA)
except Exception as e:
print "EXIT_PYS_PROBLEM_SETTING_USERS_BUT_GOING_ON"
VistA=ConnectToMUMPS(LOGFILE)
try:
print "Now setting up Patients ..."
# have to reset VistA as Signature Setup halts from VISTA
time.sleep(10)
VistA=ConnectToMUMPS(LOGFILE) # reset up VISTA
postImportSetupPatients(VistA)
except:
print "EXIT_PYS_CANT_SETUP_PATIENTS"
return
print "Setup User, Patient ... Complete OK"
def postImportSetupUsers(VistA):
"""
Setup Users - paired down in v1.2. Now only resetting signatures.
"""
# Required to add Patient, User etc
OSEHRASetup.addSystemManager(VistA)
# Open FileMan and create the VistA Health Care institution
OSEHRASetup.addInstitution(VistA,"VISTA HEALTH CARE","999")
# Create the Medical Center Division of
# the VistA Health Care institution
OSEHRASetup.addDivision(VistA,'VISTA MEDICAL CENTER',"6101","999")
# The Sikuli test for CPRS orders a Streptozyme test for the patient
# This information ensures the test can be ordered at the VistA Health care
# Facility
OSEHRASetup.setupStrepTest(VistA)
OSEHRASetup.signonZU(VistA,"SM1234","SM1234!!")
"""
Note that these verifies are temporary - VISTA forces a reset which is done as part of
the electronic signature setups below. It's the reset signature that will be used from
now on
"""
OSEHRASetup.addDoctor(VistA,"ALEXANDER,ROBERT","RA","000000029","M","fakedoc1","2Doc!@#$")
#Enter the Nurse Mary Smith
OSEHRASetup.addNurse(VistA,'SMITH,MARY','MS','000000030','F','fakenurse1','2Nur!@#$')
# Add a clerk user with permissions for Problem List Data entry
OSEHRASetup.addClerk(VistA,"CLERK,JOE","JC","000000112","M","fakeclerk1","2Cle!@#$")
# Add a Pharmacist
OSEHRASetup.addPharmacist(VistA,"SHARMA,FRED","FS","000000031","M","fakepharma1","2Pha!@#$");
#Create a new Order Menu
OSEHRASetup.createOrderMenu(VistA)
#Give all users of the instance permission to mark allergies as "Entered in error')
OSEHRASetup.addAllergiesPermission(VistA)
#Give Mary Smith permission to create shared templates
OSEHRASetup.addTemplatePermission(VistA,"MS")
# Add clinic via the XUP menu to allow scheduling
OSEHRASetup.createClinic(VistA,'VISTA HEALTH CARE','VHC','M')
"""
The sleep and ConnectToMUMPS is needed as createClinic has halted and
setup signature does a similar thing. Could debug and stop the halts but
as replacing with JS, not worth it.
Same "logic" is in OSEHRA's PostImportSetupScript.py
"""
time.sleep(10)
VistA=ConnectToMUMPS(LOGFILE)
#Set up the Doctors electronic signature
OSEHRASetup.setupElectronicSignature(VistA,"fakedoc1",'2Doc!@#$','1Doc!@#$','ROBA123')
VistA=ConnectToMUMPS(LOGFILE)
# #Set up the Nurse electronic signature
OSEHRASetup.setupElectronicSignature(VistA,"fakenurse1","2Nur!@#$","1Nur!@#$","MARYS123")
VistA=ConnectToMUMPS(LOGFILE)
# #Set up the Clerk verification code
OSEHRASetup.setupElectronicSignature(VistA,"fakeclerk1","2Cle!@#$","1Cle!@#$","CLERKJ123")
def postImportSetupPatients(VistA):
# Add patient to the instance using the registration menu.
# Not using the Clerk user to avoid dropping the connection on the error when trying to connect to the MPI.
# and the Register a Patient menu option.
# The patient can be a veteran but not service connected
# Function arguments:
# VistA, Patient Name, Patient Sex,Patient DOB, Patient SSN, Patient Veteran?
OSEHRASetup.addPatient(VistA,'dataFiles/patdata0.csv')
def main():
simpleSetup2()
if __name__ == "__main__":
main()
|
vistadataproject/nodeVISTA
|
setupDocker/pySetup/simpleSetup2.py
|
Python
|
agpl-3.0
| 5,139 | 0.018681 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.