hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794888b89fe9a82c25c6f0b309d5fbacd559fb99
| 44,347 |
py
|
Python
|
resources/lib/frameMyMusic.py
|
sitronet/script.kodijivelette
|
7c75e9f5234b0f89cd76f8be826b33aee7ed5e5a
|
[
"Apache-2.0"
] | null | null | null |
resources/lib/frameMyMusic.py
|
sitronet/script.kodijivelette
|
7c75e9f5234b0f89cd76f8be826b33aee7ed5e5a
|
[
"Apache-2.0"
] | null | null | null |
resources/lib/frameMyMusic.py
|
sitronet/script.kodijivelette
|
7c75e9f5234b0f89cd76f8be826b33aee7ed5e5a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
global savepath
global Kodi
Kodi = True
#sys.path.append(os.path.join(os.path.dirname(__file__), "resources", "lib"))
import threading
import time
import urllib
import os
from resources.lib import connexionClient, ecoute, outils
from resources.lib.ecoute import Souscription
from resources.lib import pyxbmctExtended
from resources.lib.outils import debug
import json
if Kodi:
import xbmc
import xbmcgui
import xbmcaddon
import pyxbmct
SIZE_WIDTH_pyxbmct = 1280
SIZE_HEIGHT_pyxbmct = 720
SEIZE = 16 * 4 #32 16 option in a 16:9 screen here it's 64:36 squares
NEUF = 9 * 4 #18 or 9
ADDON = xbmcaddon.Addon()
ARTWORK = xbmc.translatePath(os.path.join(ADDON.getAddonInfo('path'), 'resources', 'skins', 'Default', 'media'))
savepath = xbmc.translatePath('special://temp')
# Kodi key action codes.
# More codes available in xbmcgui module
ACTION_PREVIOUS_MENU = 10
"""ESC action"""
ACTION_NAV_BACK = 92
"""Backspace action"""
ACTION_MOVE_LEFT = 1
"""Left arrow key"""
ACTION_MOVE_RIGHT = 2
"""Right arrow key"""
ACTION_MOVE_UP = 3
"""Up arrow key"""
ACTION_MOVE_DOWN = 4
"""Down arrow key"""
ACTION_MOUSE_WHEEL_UP = 104
"""Mouse wheel up"""
ACTION_MOUSE_WHEEL_DOWN = 105
"""Mouse wheel down"""
ACTION_MOUSE_DRAG = 106
"""Mouse drag"""
ACTION_MOUSE_MOVE = 107
"""Mouse move"""
ACTION_MOUSE_LEFT_CLICK = 100
"""Mouse click"""
TIME_OF_LOOP_SUBSCRIBE = ecoute.TIME_OF_LOOP_SUBSCRIBE
from resources.lib.outils import debug
DEBUG_LEVEL = xbmc.LOGDEBUG
TAGS = 'aCdejJKlstuwxy'
class MyMusic(pyxbmctExtended.BackgroundDialogWindow):
def __init__(self, *args , **kwargs ):
#title = args[0]
super(MyMusic, self).__init__()
self.recevoirEnAttente = threading.Event()
self.recevoirEnAttente.clear()
self.Abonnement = threading.Event()
self.threadRunning = True
self.WindowPlaying = xbmcgui.getCurrentWindowId()
debug('fenetre de class MyMusic n° : ' + str(self.WindowPlaying), DEBUG_LEVEL)
debug('Create Instance frame MyMusic ' , DEBUG_LEVEL)
self.playerid = ''
self.geometrie()
debug('geometrie set', DEBUG_LEVEL)
self.controlMenus()
debug('control set', DEBUG_LEVEL)
#self.set_navigation()
debug('navigation set', DEBUG_LEVEL)
self.connexionEvent()
self.connect(self.listMenu_principal, lambda : self.f_detailAlbums())
self.connect(self.listMenu_detailAlbums, lambda : self.f_listeTracks(menudeprovenance='listMenu_detailAlbums'))
self.connect(self.listMenu_playlist, self.f_detailItemPlaylist)
self.connect(pyxbmct.ACTION_NAV_BACK, self.quit_listing) # rather self.close
self.connect(pyxbmct.ACTION_PREVIOUS_MENU, self.quit_listing) # rather self.close
self.setFocus(self.listMenu_principal)
def geometrie(self):
SIZESCREEN_HEIGHT = xbmcgui.getScreenHeight() # exemple # 1080
SIZESCREEN_WIDTH = xbmcgui.getScreenWidth() # 1920
# replaced by pyxbmct but need for the size cover until the fix
self.GRIDSCREEN_Y, Reste = divmod(SIZESCREEN_HEIGHT, 10) # 108
self.GRIDSCREEN_X, Reste = divmod(SIZESCREEN_WIDTH, 10) # 192
self.screenx = SIZESCREEN_WIDTH
self.screeny = SIZESCREEN_HEIGHT
debug('Real Size of Screen : ' + str(self.screenx) + ' x ' + str(self.screeny), DEBUG_LEVEL)
if self.screenx > SIZE_WIDTH_pyxbmct:
self.screenx = SIZE_WIDTH_pyxbmct
self.screeny = SIZE_HEIGHT_pyxbmct
self.image_dir = ARTWORK # path to pictures used in the program (future development, todo)
self.cover_jpg = self.image_dir + '/music.png' # pour le démarrage then updated
self.image_background = self.image_dir + '/fond-noir.jpg' # in next release could be change by users
self.image_button_pause = self.image_dir + '/pause.png' # get from Xsqueeze
self.image_button_stop = self.image_dir + '/stop.png' # get from Xsqueeze
self.image_button_play = self.image_dir + '/play.png' # get from Xsqueeze
self.textureback_slider_duration = self.image_dir + '/slider_back.png' # get from plugin audio spotify
self.texture_slider_duration = self.image_dir + '/slider_button_new.png'
self.image_list_focus = self.image_dir + '/MenuItemFO.png' # myself
#pyxbmct :
self.setGeometry(self.screenx , self.screeny , NEUF, SEIZE)
debug('Size of Screen pyxbmct fix to : ' + str(self.screenx) + ' x ' + str(self.screeny), DEBUG_LEVEL)
# cover when playing
#SIZECOVER_X = int(self.screenx / SEIZE * 28 )
SIZECOVER_X = (SEIZE // 2) - 6
self.sizecover_x = SIZECOVER_X
#SIZECOVER_Y = self.GRIDSCREEN_Y * 3 # and reserve a sized frame to covers,attention SIZECOVER_X != SIZECOVER_Y
debug('Taille pochette : ' + str(SIZECOVER_X) + ' x ' + str(SIZECOVER_X) , DEBUG_LEVEL)
ligneButton = NEUF - 3
SLIDER_INIT_VALUE = 0
# reserve pour afficher cover.jpg
self.cover_jpg = self.image_dir + '/vinyl.png' # pour le démarrage then updated
# need some adjustment
# reserve pour afficher cover.jpg
self.pochette = pyxbmct.Image(self.cover_jpg)
self.placeControl(control=self.pochette,
row=3,
column=(SEIZE // 2) ,
rowspan=28,
columnspan=29) # todo to fix
#self.pochette.setImage(self.cover_jpg)
# Slider de la durée
self.slider_duration = pyxbmct.Slider(textureback=self.textureback_slider_duration)
self.placeControl(control=self.slider_duration,
row=ligneButton - 2,
column=(SEIZE // 2),
rowspan=1,
columnspan=29 - 2,
pad_x=1)
self.slider_duration.setPercent(SLIDER_INIT_VALUE)
# labels des durée
self.labelduree_jouee = pyxbmct.Label('',textColor ='0xFF808080')
self.placeControl(control=self.labelduree_jouee,
row=ligneButton - 2,
column=(SEIZE // 2) ,
rowspan=2,
columnspan=5,
pad_x=5,
pad_y=5)
self.labelduree_fin = pyxbmct.Label('',textColor ='0xFF888888')
self.placeControl(control=self.labelduree_fin,
row=ligneButton - 2,
column=(SEIZE // 2) + (29 - 5),
rowspan=2,
columnspan=4,
pad_x=5,
pad_y=5)
def controlMenus(self):
''' Fix the size of itemlists in menus lists'''
self.title_label = pyxbmct.Label('', textColor='0xFF808080')
self.placeControl(self.title_label, 0 , 2 , 1 , 10)
row_depart = 2
col_depart = 0
espace_row = 35
espace_col = SEIZE / 4
hauteur_menu = 25
self.listMenu_principal = pyxbmct.List(buttonFocusTexture=self.image_list_focus, _imageWidth= 40 , _imageHeight = 40 , _itemHeight=42)
self.listMenu_detailAlbums = pyxbmct.List(buttonFocusTexture=self.image_list_focus, _imageWidth= 30 , _imageHeight = 30 , _itemHeight=30)
self.listMenu_playlist = pyxbmct.List(buttonFocusTexture=self.image_list_focus, _imageWidth= 40 , _imageHeight = 40 , _itemHeight=42)
self.placeControl(self.listMenu_principal , row_depart , col_depart , espace_row, espace_col )
self.placeControl(self.listMenu_detailAlbums , row_depart , col_depart + espace_col, espace_row, espace_col )
self.placeControl(self.listMenu_playlist , row_depart , col_depart , espace_row, (SEIZE / 2) - 2 )
# TRES IMPORTANT POUR AVOIR LE FOCUS
# Add items to the list , need to ask the focus before filling the list from Plugin.Plugin
self.listMenu_principal.addItem('.') # then be filling by the Plugin
self.listMenu_playlist.addItem('.')
def connexionEvent(self):
# Connect key and mouse events for list navigation feedback.
self.connectEventList(
[pyxbmct.ACTION_MOVE_DOWN,
pyxbmct.ACTION_MOVE_UP,
pyxbmct.ACTION_MOUSE_WHEEL_DOWN,
pyxbmct.ACTION_MOUSE_WHEEL_UP,
pyxbmct.ACTION_MOUSE_MOVE,
pyxbmct.ACTION_MOVE_LEFT,
pyxbmct.ACTION_MOVE_RIGHT],
self.list_Menu_Navigation)
def onAction(self, action):
"""
Catch button actions.
``action`` is an instance of :class:`xbmcgui.Action` class.
"""
if action == ACTION_PREVIOUS_MENU:
debug('Previous_menu' , DEBUG_LEVEL)
self.quit_listing()
elif action == ACTION_NAV_BACK:
debug('nav_back' , DEBUG_LEVEL)
self.quit_listing()
else:
debug('else condition onAction in FrameMyMusic Class MyMusic' , DEBUG_LEVEL)
self._executeConnected(action, self.actions_connected)
def quit_listing(self):# todo : à tester
self.WindowPlayinghere = xbmcgui.getCurrentWindowId()
debug('fenetre listing is exiting: ' + str(self.WindowPlayinghere), DEBUG_LEVEL)
#debug('fenetre enregistrée dans methode now_is_playing n° : ' + str(self.Window_is_playing), DEBUG_LEVEL) # attribute error here
#self.Abonnement.clear() # -> AttributeError: 'SlimIsPlaying' object has no attribute 'Abonnement'
# todo : tester appel fonction du prg principal
# frameMenu.FenetreMenu.desabonner() -> TypeError: unbound method desabonner() must be called with FenetreMenu
# instance as first argument (got nothing instead)
# self.subscribe.resiliersouscription() # -> AttributeError: 'SlimIsPlaying' object has no attribute subscribe
self.connectInterface()
self.get_playerid()
self.subscribe = ecoute.Souscription(self.InterfaceCLI, self.playerid)
self.subscribe.resiliersouscription()
# doit on récupérer la réponse ?
self.Abonnement.clear()
self.threadRunning = False
self.close()
def set_navigation(self):
# Set navigation between controls (Button, list or slider)
# Control has to be added to a window first if not raise RuntimeError
self.listMenu_principal.controlRight(self.listMenu_detailAlbums)
self.listMenu_detailAlbums.controlRight(self.listMenu_principal)
self.listMenu_principal.controlLeft(self.listMenu_detailAlbums)
self.listMenu_detailAlbums.controlLeft(self.listMenu_principal)
# Set initial focus , don't forget to fill an item before setfocus
self.setFocus(self.listMenu_principal)
def list_Menu_Navigation(self):
# todo écrire quoi faire quanq un item est sélectionné dans le listemenu
if self.getFocus() == self.listMenu_principal:
self.itemSelection = self.listMenu_principal.getListItem(
self.listMenu_principal.getSelectedPosition()).getLabel()
self.title_label.setLabel(self.itemSelection)
elif self.getFocus() == self.listMenu_detailAlbums:
self.itemSelectiondetail = self.listMenu_detailAlbums.getListItem(
self.listMenu_detailAlbums.getSelectedPosition()).getLabel()
self.title_label.setLabel(self.itemSelectiondetail)
def f_detailAlbums(self):
self.get_playerid()
self.get_ident_server()
self.connectInterface()
self.listMenu_detailAlbums.reset()
labelajouer = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getLabel()
artist = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('artist')
self.title_label.setLabel(labelajouer)
# retrieve the filename cover.jpg from previous menulist and print it on coverbox
file_image = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('image')
if file_image:
self.pochette.setImage(file_image)
album_id = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('id')
artist_id = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('artist_id')
requete = 'tracks 0 100 artist_id:' + artist_id + ' album_id:' + album_id + ' sort:tracknum ' + 'tags:' + TAGS
self.InterfaceCLI.viderLeBuffer()
self.InterfaceCLI.sendtoCLISomething(requete)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
'''exemple de reponse :
tracks|0|100|artist_id:4216|album_id:1683|tags:aCdejJKlstuwxy|
id:20179|title:Au pays des Merveilles de Juliet|artist:Yves Simon|compilation:0|duration:144.144|
album_id:1683|coverart:1|artwork_track_id:afe480cb|album:Au Pays Des Merveilles De Juliet|artist_id:4216|tracknum:3|
url:file:///i-data/e0c90389/music/Musique/Yves%20Simon/03%20Au%20pays%20des%20Merveilles%20de%20Juliet.flac|
remote:0|year:2007|
[...]
id:20178|title:Rue de la Huchette|artist:Yves Simon|compilation:0|duration:139.808|album_id:1683|coverart:1|
artwork_track_id:afe480cb|album:Au Pays Des Merveilles De Juliet|artist_id:4216|tracknum:2|
url:file:///i-data/e0c90389/music/Musique/Yves%20Simon/02%20Rue%20de%20la%20Huchette.flac|
remote:0|year:2007|
count:10
'''
# enlever entête et queue
texte_en_liste_a_traiter = reponse.split('|count:')
debug('texte_a_traiter : ' + str(texte_en_liste_a_traiter) , DEBUG_LEVEL )
if texte_en_liste_a_traiter == ['']:
# erreur dans la réponse
outils.functionNotYetImplemented()
return
try:
nombreDItemsTracks = texte_en_liste_a_traiter.pop()
except IndexError:
outils.functionNotYetImplemented()
return
try:
texte_a_traiter_titre = texte_en_liste_a_traiter.pop()
texte_en_liste_a_traiter_entete = texte_a_traiter_titre.split('tags:' + TAGS + '|' )
debug('texte_a_traiter titre: ' + str(texte_en_liste_a_traiter_entete) , DEBUG_LEVEL )
except IndexError:
item = xbmcgui.ListItem()
item.setLabel('Get an Error from Server! ')
self.listMenu_detailAlbums.addItem(item)
return
# exemple :
try:
lesItemsTracksNormalised = texte_en_liste_a_traiter_entete[1]
debug('lesItemsTracksNormalised : ' + lesItemsTracksNormalised, DEBUG_LEVEL )
except IndexError:
return
try:
lachainedesItemsTracks = lesItemsTracksNormalised.split('|') #
debug('detail Albums : ' + str(lachainedesItemsTracks) , DEBUG_LEVEL)
except IndexError:
debug('functionNotYetImplemented detailAlbums 310', DEBUG_LEVEL)
outils.functionNotYetImplemented()
return
'''
exemple detail albums :
['id:23528', 'title:Allende', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:270.367', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:23',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extras/nde.mp3', 'remote:0',
'year:0',
'id:23531', 'title:Avec le temps', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:169.795', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:26',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extra',
'remote:0', 'year:0',
[...]
, 'id:23529', 'title:Words Words Words', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:219.689', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:24',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extrasrds.mp3',
'remote:0', 'year:0']
'''
secondEtsuivant = False
index = 0
indice ='' # fix if tracknum doesn't exist
titre=''
year=''
duree = ''
itemsTracks= [] # une liste
itemtampon = xbmcgui.ListItem()
for chaine in lachainedesItemsTracks:
debug('detail album 1 item : ' + str(chaine), DEBUG_LEVEL)
try:
clef, valeur = chaine.split(':', 1)
except ValueError:
# probably there are some ':' in the chaine (lyrics or information around the title)
# so we go on anyway
pass
if clef == 'id':
if secondEtsuivant:
itemtampon.setLabel(indice + ' - ' + titre + ' - ' + year + ' : ' + duree + ' .')
itemtampon.setProperty('album_id' , album_id)
itemtampon.setProperty('artist_id' , artist_id)
itemsTracks.append(itemtampon)
itemtampon = xbmcgui.ListItem()
index = index + 1
debug( 'Ajout de l item dans listItem tampon' + titre + ' ' + itemtampon.getProperty('track_id'), DEBUG_LEVEL)
itemtampon.setProperty('track_id', valeur)
secondEtsuivant = True
elif clef == 'title':
titre = valeur
#itemtampon.setLabel(valeur)
elif clef == 'duration':
duree = outils.getInHMS(valeur)
elif clef == 'artwork_track_id':
hascode_artwork = valeur
completeNameofFile = self.get_artwork(hashcode_artwork=hascode_artwork)
#itemtampon.setArt({'thumb': completeNameofFile})
itemtampon.setProperty('image' , completeNameofFile)
elif clef == 'tracknum':
itemtampon.setProperty(clef, valeur)
indice = valeur
elif clef == 'year':
itemtampon.setProperty(clef, valeur)
year = valeur
else:
# not sure that we have to keep other value
# for now we keep them but todo pass them
itemtampon.setProperty(clef, valeur)
# once exit the loop 'for' , fill the list with the last itemtampon :
itemtampon.setProperty('album_id' , album_id)
itemtampon.setProperty('artist_id' , artist_id)
itemtampon.setLabel(indice + ' - ' + titre + ' - ' + year + ' : ' + duree + ' .')
itemsTracks.append(itemtampon)
debug( 'Ajout de l item dans listItem tampon ' + titre + ' ' + itemtampon.getProperty('track_id'), DEBUG_LEVEL)
#sort the itemsTracks list by tracknum todo test this function or similar
#sorted(itemsTracks, key=lambda tracknum: tracknum[1]) # sort by n° track not always true
for item in itemsTracks:
debug('ajout de item tracks dans menu detailAlbum : ' + item.getLabel() , DEBUG_LEVEL)
self.listMenu_detailAlbums.addItem(item)
# End of funcion f_detailAlbums
def f_listeTracks(self, menudeprovenance):
if menudeprovenance == 'listMenu_detailAlbums' :
labelajouer = self.listMenu_detailAlbums.getListItem(self.listMenu_detailAlbums.getSelectedPosition()).getLabel()
#cmd = self.command
#cmd = self.listMenu_ItemRadios.getListItem( self.listMenu_ItemRadios.getSelectedPosition()).getProperty('cmd')
#cmd ='picks' , ....Radio, shoutcast ...
# cmd comes from listMenu_1 it is not an error
tracknum = self.listMenu_detailAlbums.getListItem(
self.listMenu_detailAlbums.getSelectedPosition()).getProperty('tracknum')
track_id = self.listMenu_detailAlbums.getListItem(
self.listMenu_detailAlbums.getSelectedPosition()).getProperty('track_id')
album_id = self.listMenu_detailAlbums.getListItem(
self.listMenu_detailAlbums.getSelectedPosition()).getProperty('album_id')
artist_id = self.listMenu_detailAlbums.getListItem(
self.listMenu_detailAlbums.getSelectedPosition()).getProperty('artist_id')
debug('launch to play : ' + labelajouer + ' playlistcontrol cmd:load track_id:' + track_id , DEBUG_LEVEL )
choix = xbmcgui.Dialog().select(heading= labelajouer, list= ['Play Song now', \
'Play Album now ' ,\
'Play Song after the current song' , \
'Add Song to Playlist' , \
'More Info'])
if choix == 0:
# exemple : playlistcontrol cmd:load album_id:1683
requete = self.playerid + ' playlistcontrol cmd:load track_id:' + str(track_id)
elif choix == 1:
requete = self.playerid + ' playlistcontrol cmd:load album_id:' + str(album_id)
elif choix == 2:
requete = self.playerid + ' playlistcontrol cmd:insert track_id:' + str(track_id)
elif choix == 3:
requete = self.playerid + ' playlistcontrol cmd:add track_id:' + str(track_id)
if not choix < 0 and not choix > 3 :
debug('requete : ' + requete , DEBUG_LEVEL )
self.InterfaceCLI.sendtoCLISomething(requete)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
# now it is going to play
requetePlay = self.playerid + ' playlist play'
self.InterfaceCLI.sendtoCLISomething(requetePlay)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
del reponse
# then stay on the screen and update duration and progress listmenu
self.update_current_track_playing()
elif choix == 4:
# need more stuff to dig songinfo
requete = self.playerid + ' songinfo 0 100 track_id:' + str(track_id)
self.InterfaceCLI.sendtoCLISomething(requete)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
try:
listesonginfo = reponse.split('|')
except ValueError:
outils.functionNotYetImplemented()
textInfo = ''
for field in listesonginfo:
textInfo = textInfo + field + '\r\n'
dialogSongInfo = xbmcgui.Dialog()
dialogSongInfo.textviewer('Song Info : ' + labelajouer , textInfo )
else:
# cancel asked
pass
# End of function f_listeTracks
def f_detailItemPlaylist(self):
# todo rewrite function to have a nice personnal textbox in a frame
# with selected item
labelajouer = self.listMenu_playlist.getListItem(
self.listMenu_playlist.getSelectedPosition()).getLabel()
track_id = self.listMenu_playlist.getListItem(
self.listMenu_playlist.getSelectedPosition()).getProperty('tracked_id')
self.connectInterface()
self.get_playerid()
self.subscribe = ecoute.Souscription(self.InterfaceCLI, self.playerid)
self.subscribe.resiliersouscription()
self.InterfaceCLI.viderLeBuffer()
requete = self.playerid + ' songinfo 0 100 track_id:' + str(track_id)
self.InterfaceCLI.sendtoCLISomething(requete)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
try:
listesonginfo = reponse.split('|')
except ValueError:
outils.functionNotYetImplemented()
return
textInfo = ''
for field in listesonginfo:
textInfo = textInfo + field + '\r\n'
dialogSongInfo = xbmcgui.Dialog()
dialogSongInfo.textviewer('Song Info : ' + labelajouer , textInfo )
# copier/coller de la fonction de frameMenu.py
def update_current_track_playing(self):
self.subscribe = Souscription(self.InterfaceCLI, self.playerid )
self.subscribe.subscription()
# todo Q : comment faire la gestion de l'arret de la boucle de souscription ?
# A : fonction resiliersouscription()
time.sleep(0.5)
timeoutdeRecherchedesPlayers = time.time() + 60 * 20 # todo toutes les 20 minutes nous rechercherons les players
timeEntreeDansLaBoucle = time.time()
compteur = 1
titreenlecture = ''
self.breakBoucle_A = False
self.Abonnement.set()
while self.Abonnement.is_set():
self.breakBoucle_A = False
if xbmc.Monitor().waitForAbort(0.5):
self.breakBoucle_A = True
self.Abonnement.clear()
break
timeoutdeTestdelaBoucle = time.time() + 60 * 10 # 10 minutes from now -for testing
while (self.breakBoucle_A == False): # Boucle A principale de Subscribe ?same as 'not self.breakBoucle_A'?
if time.time() > timeoutdeTestdelaBoucle:
debug('Timeout : break A ', DEBUG_LEVEL)
self.breakBoucle_A = True
if xbmc.Monitor().waitForAbort(0.5):
self.breakBoucle_A = True
self.Abonnement.clear()
# Todo : analyse du bloc
recupropre = self.InterfaceCLI.receptionReponseEtDecodage()
if 'subscribe:-' in recupropre: # fin souscription the resiliersouscription is send by FramePlaying or
# else diplaying
# the FramePlaying exits - function quit()
self.breakBoucle_A = True # must exit the loop A but doesn't exist here
self.Abonnement.clear() # must exit the main loop
break
listeB = recupropre.split('subscribe:' + TIME_OF_LOOP_SUBSCRIBE + '|') # on élimine le début de la trame
# attention doit correpondre à
# la même valeur de subscribe dans ecoute.py
try:
textC = listeB[1] # on conserve la deuximème trame après suscribe...
except IndexError:
break
# mise à jour de la position de l'item dans le menu liste
indexdecurrentTitle = textC.find('cur_index:')
indexFincurrentTitle = textC.find('|', indexdecurrentTitle)
# debug('index debut : ' + str(indexdecurrentTitle) + ' fin : ' + str(indexFincurrentTitle), xbmc.LOGDEBUG)
playlist_current_index_title = textC[indexdecurrentTitle + 10: indexFincurrentTitle]
debug('current_index_title :' + playlist_current_index_title, DEBUG_LEVEL)
self.listMenu_detailAlbums.selectItem(int(playlist_current_index_title))
listeRetour = textC.split('|') # on obtient une liste des items
dico = dict() # pour chaque élement de la liste sous la forme <val1>:<val2>
for x in listeRetour: # on la place dans un dictionnaire
c = x.split(':') # sous la forme key: value et <val1> devient la key
if dico.has_key(c[0]): # nous avons déjà une occurence
pass
else:
clef = c[0]
dico[clef] = c[1] # ensuite on pourra piocher dans le dico la valeur
try:
pourcentagedureejouee = 100 * float(dico['time']) / float(dico['duration'])
debug('percent duree : ' + str(pourcentagedureejouee) + ' - time: ' + dico['time'],
xbmc.LOGDEBUG)
except KeyError:
pourcentagedureejouee = 0
try:
self.slider_duration.setPercent(pourcentagedureejouee)
except KeyError:
continue
try:
self.labelduree_jouee.setLabel(label=outils.getInHMS(dico['time']))
except KeyError:
continue
try:
self.labelduree_fin.setLabel(label=outils.getInHMS(dico['duration']))
except KeyError:
self.labelduree_fin.setLabel(label=outils.getInHMS(0.0))
if not dico['title'] == titreenlecture:
try:
self.title_label.setLabel(label='[B]' + dico['current_title'] + '[/B]')
except KeyError:
self.title_label.setLabel(label='')
pass
'''
self.InterfaceCLI.sendtoCLISomething('album ?')
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
album = reponse.split('album|').pop()
self.jivelette.labelAlbum.reset()
if not '|album' in album:
self.jivelette.labelAlbum.addLabel(label='[B]' + album + '[/B]')
self.InterfaceCLI.sendtoCLISomething('artist ?')
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
artist = reponse.split('artist|').pop()
self.jivelette.labelArtist.reset()
if not '|artist' in artist:
self.jivelette.labelArtist.addLabel(label='[B]' + artist + '[/B]')
'''
#self.update_coverbox(self.lmsip, self.lmswebport, self.playerid, compteur)
# log pour voir
compteur += 1
timedutour = time.time()
tempsparcouru = timedutour - timeEntreeDansLaBoucle
debug(str(compteur) + ' tour de boucle : ' + str(tempsparcouru), xbmc.LOGDEBUG)
# fin de la boucle A : sortie de subscribe
# fin boucle while
debug('End of Boucle in Update_curent_track in FrameMyMusic', DEBUG_LEVEL)
self.subscribe.resiliersouscription()
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
debug('Send resiliersouscription in A update_current_track in FrameMyMusic', DEBUG_LEVEL)
self.InterfaceCLI.viderLeBuffer()
debug('End of fonction update_current_track_is_playing in FrameMyMusic, Bye', DEBUG_LEVEL)
# fin fonction update_current_track_is_playing
def connectInterface(self):
self.InterfaceCLI = connexionClient.InterfaceCLIduLMS()
def get_playerid(self):
self.Players = outils.WhatAreThePlayers()
self.playerid = self.Players.get_unplayeractif()
def get_ident_server(self):
self.Server = outils.WhereIsTheLMSServer()
self.nomserver = self.Server.LMSnom
self.lmsip = self.Server.LMSCLIip
self.lmswebport = self.Server.LMSwebport
def get_icon(self, index, urlicone):
'''
fetch the image or icon from server or somewhere in tne net
and store it in a temporay directory .
ie /tmp/ on unix or
special://tmp doesn't seems work on librelec
'''
filename = 'icon.image_' + str(index) + '.tmp'
completeNameofFile = os.path.join(savepath, filename)
debug('filename icon : ' + str(completeNameofFile), xbmc.LOGDEBUG)
if 'http' in urlicone:
urltoopen = urlicone
else:
if urlicone.startswith('/'):
debug('url icone avec /: ' + urlicone ,xbmc.LOGDEBUG )
#urltoopen = 'http://' + self.origine.rechercheduserveur.LMSCLIip + ':' + self.origine.rechercheduserveur.LMSwebport + '/' + urlicone
urltoopen = 'http://' + self.lmsip + ':' + self.lmswebport + urlicone
else:
debug('url icone sans /: ' + urlicone ,xbmc.LOGDEBUG )
urltoopen = 'http://' + self.lmsip + ':' + self.lmswebport + '/' + urlicone
try:
urllib.urlretrieve(urltoopen, completeNameofFile)
except IOError:
outils.functionNotYetImplemented()
debug('nom du fichier image : ' + completeNameofFile , DEBUG_LEVEL)
return completeNameofFile
# fin fonction fin fonction get_icon, class Plugin_Generique
# test
def get_artwork(self, hashcode_artwork):
#http://<server>:<port>/music/<track_id>/cover.jpg
filename = 'icon.image_' + str(hashcode_artwork) + '.tmp'
completeNameofFile = os.path.join(savepath, filename)
urlimage = 'http://' + self.lmsip + ':' + self.lmswebport + '/music/' + str(hashcode_artwork) + '/cover.jpg'
try:
urllib.urlretrieve(urlimage, completeNameofFile)
except IOError:
pass
outils.functionNotYetImplemented()
debug('nom du fichier image : ' + completeNameofFile , DEBUG_LEVEL)
return completeNameofFile
def update_coverbox(self, lmsip, lmswebport, playerid, compteur):
'''
fonction qui devrait mettre à jour l'affichage de la pochette
dans cette version on récupère la pochette du serveur pour le player courant
dans une autre version on récupère la pochette du serveur grâce au tag fourni
dans l'information sur la chanson en cours
Same function in FramePlaying.py (redondance)
:param lmsip:
:param lmswebport:
:param playerid:
:return:
'''
# on construit l'url de récupération de l'image
#http://<server>:<port>/music/current/cover.jpg?player=<playerid>
# exemple : http://192.168.1.101:9000/music/current/cover.jpg?player=00:04:20:17:1c:44
# ou bien http://<server>:<port>/music/<track_id>/cover.jpg
# exemple :
urlcover = 'http://' + lmsip + ':' + lmswebport + \
'/music/current/cover.jpg?player=' + playerid # or self.playerID ?
debug(urlcover, DEBUG_LEVEL)
filename = 'pochette' + str(compteur) + '.tmp'
completeNameofFile = os.path.join(savepath , filename )
debug('filename tmp : ' + str(completeNameofFile), DEBUG_LEVEL)
urllib.urlretrieve(urlcover , completeNameofFile)
self.pochette.setImage(completeNameofFile) # fonction d'xbmcgui
#os.remove(completeNameofFile) # suppression du fichier
# fin fonction update_cover
def futureFunction(self):
pass
def promptVolume(self):
volumeFrame = outils.VolumeFrameChild()
volumeFrame.doModal()
del volumeFrame
def promptContextMenu(self):
contextMenuFrame = outils.ContextMenuFrameChild()
contextMenuFrame.doModal()
del contextMenuFrame
class AllArtists(MyMusic):
def __init__(self, *args, **kwargs):
super(AllArtists, self).__init__()
self.connect(self.listMenu_principal, self.f_detailAlbums)
self.setFocus(self.listMenu_principal)
def onAction(self, action):
"""
Catch button actions.
``action`` is an instance of :class:`xbmcgui.Action` class.
"""
if action == ACTION_PREVIOUS_MENU:
debug('Previous_menu' , DEBUG_LEVEL)
self.quit_listing()
elif action == ACTION_NAV_BACK:
debug('nav_back' , DEBUG_LEVEL)
self.quit_listing()
else:
debug('else condition onAction in frameMyMusic class AllArtists' , DEBUG_LEVEL)
self._executeConnected(action, self.actions_connected)
def f_detailAlbums(self):
self.get_playerid()
self.get_ident_server()
self.connectInterface()
self.listMenu_detailAlbums.reset()
labelajouer = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getLabel()
artist = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('artist')
self.title_label.setLabel(labelajouer)
# retrieve the filename cover.jpg from previous menulist and print it on coverbox
file_image = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty(
'image')
if file_image:
self.pochette.setImage(file_image)
album_id = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('id')
artist_id = self.listMenu_principal.getListItem(self.listMenu_principal.getSelectedPosition()).getProperty('artist_id')
requete = 'tracks 0 100 artist_id:' + artist_id + ' album_id:' + album_id + ' sort:tracknum ' + 'tags:' + TAGS
self.InterfaceCLI.viderLeBuffer()
self.InterfaceCLI.sendtoCLISomething(requete)
reponse = self.InterfaceCLI.receptionReponseEtDecodage()
'''exemple de reponse :
tracks|0|100|artist_id:4216|album_id:1683|tags:aCdejJKlstuwxy|
id:20179|title:Au pays des Merveilles de Juliet|artist:Yves Simon|compilation:0|duration:144.144|
album_id:1683|coverart:1|artwork_track_id:afe480cb|album:Au Pays Des Merveilles De Juliet|artist_id:4216|tracknum:3|
url:file:///i-data/e0c90389/music/Musique/Yves%20Simon/03%20Au%20pays%20des%20Merveilles%20de%20Juliet.flac|
remote:0|year:2007|
[...]
id:20178|title:Rue de la Huchette|artist:Yves Simon|compilation:0|duration:139.808|album_id:1683|coverart:1|
artwork_track_id:afe480cb|album:Au Pays Des Merveilles De Juliet|artist_id:4216|tracknum:2|
url:file:///i-data/e0c90389/music/Musique/Yves%20Simon/02%20Rue%20de%20la%20Huchette.flac|
remote:0|year:2007|
count:10
'''
# enlever entête et queue
texte_en_liste_a_traiter = reponse.split('|count:')
debug('texte_a_traiter : ' + str(texte_en_liste_a_traiter), DEBUG_LEVEL)
if texte_en_liste_a_traiter == ['']:
# erreur dans la réponse
outils.functionNotYetImplemented()
try:
nombreDItemsTracks = texte_en_liste_a_traiter.pop()
except IndexError:
outils.functionNotYetImplemented()
try:
texte_a_traiter_titre = texte_en_liste_a_traiter.pop()
texte_en_liste_a_traiter_entete = texte_a_traiter_titre.split('tags:' + TAGS + '|')
debug('texte_a_traiter titre: ' + str(texte_en_liste_a_traiter_entete), DEBUG_LEVEL)
except IndexError:
item = xbmcgui.ListItem()
item.setLabel('Get an Error from Server! ')
self.listMenu_detailAlbums.addItem(item)
# exemple :
try:
lesItemsTracksNormalised = texte_en_liste_a_traiter_entete[1]
debug('lesItemsTracksNormalised : ' + lesItemsTracksNormalised, DEBUG_LEVEL)
except IndexError:
return
try:
lachainedesItemsTracks = lesItemsTracksNormalised.split('|') #
debug('detail Albums : ' + str(lachainedesItemsTracks), DEBUG_LEVEL)
except IndexError:
debug('functionNotYetImplemented detailAlbums 310', DEBUG_LEVEL)
outils.functionNotYetImplemented()
'''
exemple detail albums :
['id:23528', 'title:Allende', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:270.367', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:23',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extras/nde.mp3', 'remote:0',
'year:0',
'id:23531', 'title:Avec le temps', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:169.795', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:26',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extra',
'remote:0', 'year:0',
[...]
, 'id:23529', 'title:Words Words Words', 'artist:1984 - En public au Theatre des Champs Elysees', 'compilation:0',
'duration:219.689', 'album_id:1967', 'coverart:0', 'album:Cd3', 'artist_id:4425', 'tracknum:24',
'url:file:///i-data/e0c/music/TOUT_Leo_Ferre_ou_Presque...48_CD_et_Extrasrds.mp3',
'remote:0', 'year:0']
'''
secondEtsuivant = False
index = 0
indice = '' # fix if tracknum doesn't exist
titre = ''
year = ''
duree = ''
itemsTracks = [] # une liste
itemtampon = xbmcgui.ListItem()
for chaine in lachainedesItemsTracks:
debug('detail album 1 item : ' + str(chaine), DEBUG_LEVEL)
try:
clef, valeur = chaine.split(':', 1)
except ValueError:
# probably there are some ':' in the chaine (lyrics or information around the title)
# so we go on anyway
pass
if clef == 'id':
if secondEtsuivant:
itemtampon.setLabel(indice + ' - ' + titre + ' - ' + year + ' : ' + duree + ' .')
itemtampon.setProperty('album_id', album_id)
itemtampon.setProperty('artist_id', artist_id)
itemsTracks.append(itemtampon)
itemtampon = xbmcgui.ListItem()
index = index + 1
debug('Ajout de l item dans listItem tampon' + titre + ' ' + itemtampon.getProperty('track_id'),
DEBUG_LEVEL)
itemtampon.setProperty('track_id', valeur)
secondEtsuivant = True
elif clef == 'title':
titre = valeur
# itemtampon.setLabel(valeur)
elif clef == 'duration':
duree = outils.getInHMS(valeur)
elif clef == 'artwork_track_id':
hascode_artwork = valeur
completeNameofFile = self.get_artwork(hashcode_artwork=hascode_artwork)
# itemtampon.setArt({'thumb': completeNameofFile})
itemtampon.setProperty('image', completeNameofFile)
elif clef == 'tracknum':
itemtampon.setProperty(clef, valeur)
indice = valeur
elif clef == 'year':
itemtampon.setProperty(clef, valeur)
year = valeur
else:
# not sure that we have to keep other value
# for now we keep them but todo pass them
itemtampon.setProperty(clef, valeur)
# once exit the loop 'for' , fill the list with the last itemtampon :
itemtampon.setProperty('album_id', album_id)
itemtampon.setProperty('artist_id', artist_id)
itemtampon.setLabel(indice + ' - ' + titre + ' - ' + year + ' : ' + duree + ' .')
itemsTracks.append(itemtampon)
debug('Ajout de l item dans listItem tampon ' + titre + ' ' + itemtampon.getProperty('track_id'),
DEBUG_LEVEL)
# sort the itemsTracks list by tracknum todo test this function or similar
# sorted(itemsTracks, key=lambda tracknum: tracknum[1]) # sort by n° track not always true
for item in itemsTracks:
debug('ajout de item tracks dans menu detailAlbum : ' + item.getLabel(), DEBUG_LEVEL)
self.listMenu_detailAlbums.addItem(item)
# End of funcion f_detailAlbums
# fin class AllArtists(MyMusic):
| 44.840243 | 145 | 0.608452 |
794889d899d70d3af987857c8e797aeee883c213
| 3,746 |
py
|
Python
|
flexget/plugins/sites/rmz.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | 1 |
2021-03-16T18:41:47.000Z
|
2021-03-16T18:41:47.000Z
|
flexget/plugins/sites/rmz.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | null | null | null |
flexget/plugins/sites/rmz.py
|
fotile96/Flexget
|
0f7f805a43e25e27fce195b91228f911bf4c6b1e
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import re
from flexget import plugin
from flexget.event import event
from flexget.plugins.internal.urlrewriting import UrlRewritingError
from flexget.utils.soup import get_soup
from flexget.utils.search import normalize_unicode
from requests.exceptions import RequestException
log = logging.getLogger('rmz')
class UrlRewriteRmz(object):
"""
rmz.cr (rapidmoviez.com) urlrewriter
Version 0.1
Configuration
rmz:
filehosters_re:
- domain\.com
- domain2\.org
Only add links that match any of the regular expressions listed under filehosters_re.
If more than one valid link is found, the url of the entry is rewritten to
the first link found. The complete list of valid links is placed in the
'urls' field of the entry.
Therefore, it is recommended, that you configure your output to use the
'urls' field instead of the 'url' field.
For example, to use jdownloader 2 as output, you would use the exec plugin:
exec:
- echo "text={{urls}}" >> "/path/to/jd2/folderwatch/{{title}}.crawljob"
"""
schema = {
'type': 'object',
'properties': {
'filehosters_re': {'type': 'array', 'items': {'format': 'regexp'}}
},
'additionalProperties': False
}
# Since the urlrewriter relies on a config, we need to create a default one
config = {
'filehosters_re': []
}
# grab config
def on_task_start(self, task, config):
self.config = config
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
rewritable_regex = '^https?:\/\/(www.)?(rmz\.cr|rapidmoviez\.(com|eu))\/.*'
return re.match(rewritable_regex, url) is not None
@plugin.internet(log)
# urlrewriter API
def url_rewrite(self, task, entry):
try:
page = task.requests.get(entry['url'])
except RequestException as e:
raise UrlRewritingError(str(e))
try:
soup = get_soup(page.text)
except Exception as e:
raise UrlRewritingError(str(e))
link_elements = soup.find_all('pre', class_='links')
if 'urls' in entry:
urls = list(entry['urls'])
else:
urls = []
for element in link_elements:
urls.extend(element.text.splitlines())
regexps = self.config.get('filehosters_re', [])
filtered_urls = []
for i, url in enumerate(urls):
urls[i] = normalize_unicode(url)
for regexp in regexps:
if re.search(regexp, urls[i]):
filtered_urls.append(urls[i])
log.debug('Url: "%s" matched filehoster filter: %s', urls[i], regexp)
break
else:
if regexps:
log.debug('Url: "%s" does not match any of the given filehoster filters: %s', urls[i], str(regexps))
if regexps:
log.debug('Using filehosters_re filters: %s', str(regexps))
urls = filtered_urls
else:
log.debug('No filehoster filters configured, using all found links.')
num_links = len(urls)
log.verbose('Found %d links at %s.', num_links, entry['url'])
if num_links:
entry['urls'] = urls
entry['url'] = urls[0]
else:
raise UrlRewritingError('No useable links found at %s' % entry['url'])
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteRmz, 'rmz', interfaces=['urlrewriter', 'task'], api_ver=2)
| 32.859649 | 120 | 0.611319 |
794889f1709482a6ec8cba95a27f06b698941e13
| 6,485 |
py
|
Python
|
main.py
|
ewen-lbh/pyfiglet-tester
|
eb3cd4bb540a00ebb5a59d14b87ef2b19b75f6eb
|
[
"Unlicense"
] | 1 |
2020-10-30T10:46:04.000Z
|
2020-10-30T10:46:04.000Z
|
main.py
|
ewen-lbh/pyfiglet-tester
|
eb3cd4bb540a00ebb5a59d14b87ef2b19b75f6eb
|
[
"Unlicense"
] | null | null | null |
main.py
|
ewen-lbh/pyfiglet-tester
|
eb3cd4bb540a00ebb5a59d14b87ef2b19b75f6eb
|
[
"Unlicense"
] | null | null | null |
def main():
import random
import re
import shutil
import sys
import webbrowser
import pyfiglet
FONTS_LIST = [
'3-d',
'3x5',
'5lineoblique',
'acrobatic',
'alligator2',
'alligator',
'alphabet',
'avatar',
'banner3-D',
'banner3',
'banner4',
'banner',
'barbwire',
'basic',
'bell',
'bigchief',
'big',
'binary',
'block',
'broadway',
'bubble',
'bulbhead',
'calgphy2',
'caligraphy',
'catwalk',
'chunky',
'coinstak',
'colossal',
'computer',
'contessa',
'contrast',
'cosmic',
'cosmike',
'crawford',
'cricket',
'cyberlarge',
'cybermedium',
'cybersmall',
'decimal',
'diamond',
'digital',
'doh',
'doom',
'dotmatrix',
'double',
'drpepper',
'eftichess',
'eftifont',
'eftipiti',
'eftirobot',
'eftitalic',
'eftiwall',
'eftiwater',
'epic',
'fender',
'fourtops',
'fuzzy',
'goofy',
'gothic',
'graffiti',
'hex',
'hollywood',
'invita',
'isometric1',
'isometric2',
'isometric3',
'isometric4',
'italic',
'ivrit',
'jazmine',
'katakana',
'kban',
'larry3d',
'lcd',
'lean',
'letters',
'linux',
'lockergnome',
'madrid',
'marquee',
'maxfour',
'mike',
'mini',
'mirror',
'mnemonic',
'nancyj-fancy',
'nancyj',
'nancyj-underlined',
'nipples',
'o8',
'octal',
'ogre',
'os2',
'pawp',
'peaks',
'pebbles',
'pepper',
'poison',
'puffy',
'pyramid',
'rectangles',
'relief2',
'relief',
'rev',
'roman',
'rot13',
'rounded',
'rowancap',
'rozzo',
'sblood',
'script',
'serifcap',
'shadow',
'short',
'slant',
'slide',
'slscript',
'small',
'smisome1',
'smkeyboard',
'smscript',
'smshadow',
'smslant',
'speed',
'stacey',
'stampatello',
'standard',
'starwars',
'stellar',
'stop',
'straight',
'tanja',
'term',
'thick',
'thin',
'threepoint',
'ticks',
'ticksslant',
'tinker-toy',
'tombstone',
'trek',
'twopoint',
'univers',
'usaflag',
'weird',
'whimsy',
]
LOGOS_FONT_LIST = [
'avatar',
'banner',
'bell',
'big',
'chunky',
'cybermedium',
'digital',
'doom',
'double',
'graffiti',
'madrid',
'ogre',
'pepper',
'puffy',
'rectangles',
'rounded',
'script',
'shadow',
'short',
'slant',
'small',
'smkeyboard',
'standard',
'stop',
'straight',
'threepoint',
'tombstone',
'twopoint',
'weird'
]
# Function by critiqjo:
# https://gist.github.com/critiqjo/2ca84db26daaeb1715e1
def col_print(lines, term_width=None, indent=0, pad=2):
if not term_width:
size = shutil.get_terminal_size((80, 20))
term_width = size.columns
n_lines = len(lines)
if n_lines == 0:
return
col_width = max(len(line) for line in lines)
n_cols = int((term_width + pad - indent) / (col_width + pad))
n_cols = min(n_lines, max(1, n_cols))
col_len = int(n_lines / n_cols) + (0 if n_lines % n_cols == 0 else 1)
if (n_cols - 1) * col_len >= n_lines:
n_cols -= 1
cols = [lines[i * col_len: i * col_len + col_len] for i in range(n_cols)]
rows = list(zip(*cols))
rows_missed = zip(*[col[len(rows):] for col in cols[:-1]])
rows.extend(rows_missed)
for row in rows:
print(" " * indent + (" " * pad).join(line.ljust(col_width) for line in row))
website_url = 'mx3creations.com'
repo_url = 'github.com/ewen-lbh/pyfiglet-tester'
font = 'big'
logo_font = random.choice(LOGOS_FONT_LIST)
text = base_text = 'The quick brown fox JUMPS over the lazy dog !'
startup_logo = pyfiglet.figlet_format('Pyfiglet\nTester', font=logo_font) + '\n\n\n'
tutorial = f"""
===COMMANDS
set <text|-r> Set the text displayed
-r or -reset to reset it to "{base_text}"
? Shows this
/repo Go to the github repo of this script
ls List some fonts (not sure if its all of them)
random Select a font randomly from the ones shown with ls
<anything else> Displays the text using the font specified
===INFO
Script by Mx3 - {website_url}
Column print function by critiqjo (github)
Repo - {repo_url}
"""
print(startup_logo + tutorial)
while font not in ('exit', 'close'):
font = str(input('Choose a font...\n>>'))
if font[:3] == 'set':
req_text = font[4:]
if req_text in ('-reset', '-r'):
text = base_text
else:
text = str(req_text)
print('Displayed text set to "' + text + '"')
elif font[:2] in ('/h', '/?', '?'):
print(tutorial)
elif font[:2] == '/r':
webbrowser.open(repo_url)
elif font[:2] == '/w':
webbrowser.open(website_url)
elif font[:2] in ('/l', 'ls'):
print('Fonts list:\n')
col_print(FONTS_LIST)
print('\n')
elif font.strip() == 'exit':
sys.exit('Script closed.')
else:
if font.strip() == 'random':
font = random.choice(FONTS_LIST)
print(f'\n\nFont:{font}\n\n')
try:
pyfiglet.print_figlet(text, font=font)
except pyfiglet.FontNotFound:
print(f'No font named "{font}" !')
if __name__ == '__main__':
main()
| 23.496377 | 89 | 0.448574 |
79488bb855abb5880c65a77a41622820a1230166
| 605 |
py
|
Python
|
lejian/runserver.py
|
PuZheng/LEJAIN-backend
|
1647b63cb409842566f3d2cd9771f8b8856c1a03
|
[
"MIT"
] | null | null | null |
lejian/runserver.py
|
PuZheng/LEJAIN-backend
|
1647b63cb409842566f3d2cd9771f8b8856c1a03
|
[
"MIT"
] | 13 |
2015-10-23T04:43:51.000Z
|
2015-12-19T14:30:33.000Z
|
lejian/runserver.py
|
PuZheng/lejian-backend
|
1647b63cb409842566f3d2cd9771f8b8856c1a03
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
SYNOPSIS
python runserver.py [options]
OPTIONS
-h
show this help
-p <port>
the port of server runs on
-s <host>
the ip of the server runs on
"""
from getopt import getopt
import sys
opts, _ = getopt(sys.argv[1:], "s:p:h")
host = '0.0.0.0'
port = None
for o, v in opts:
if o == "-s":
host = v
elif o == "-p":
port = int(v)
elif o == "-h":
print(__doc__)
else:
print("unkown option: " + o)
print(__doc__)
from lejian.basemain import app
app.run(host=host, port=port, debug=True)
| 17.794118 | 41 | 0.550413 |
79488c3b7308486b233e97bf7a0faa547f66dcdc
| 8,529 |
py
|
Python
|
merge_data.py
|
MattTheVV/CoronaWatchNL
|
3e548c589193fe2736c66bf5829a482e5416f58a
|
[
"CC0-1.0"
] | 1 |
2021-01-19T15:08:01.000Z
|
2021-01-19T15:08:01.000Z
|
merge_data.py
|
MattTheVV/CoronaWatchNL
|
3e548c589193fe2736c66bf5829a482e5416f58a
|
[
"CC0-1.0"
] | null | null | null |
merge_data.py
|
MattTheVV/CoronaWatchNL
|
3e548c589193fe2736c66bf5829a482e5416f58a
|
[
"CC0-1.0"
] | null | null | null |
"""Merge RIVM daily stats"""
from pathlib import Path
import re
import datetime
from io import StringIO
import pandas
DATE_MAP = {
"maart": 3,
"april": 4,
"mei": 5,
}
def list_files():
return [p for p in Path('raw_data').iterdir() if p.is_file()]
def parse_format_v3(file, n_missing=None):
date_parts = re.findall(r'(\d{1,2})-(.*?)-(\d{4})', str(file))[0]
# find date
date = datetime.date(
int(date_parts[2]),
int(date_parts[1]),
int(date_parts[0])
)
df = pandas.read_csv(file, sep=";")
# find the number of missing locations
missing = re.findall(
r'Bij (\d+) personen is de woonplaats niet van bekend',
df.iat[0, 1]
)
try:
n_missing = int(missing[0])
except Exception:
pass
try:
df['id'] = df['Gemnr'].astype(int)
del df['Gemnr']
except Exception:
pass
# remove junk rows
df = df[df['id'] >= 0].copy()
# append row with missing numbers
df = df.append({
"Gemeente": None,
"id": -1,
"Aantal": n_missing},
ignore_index=True)
# print(df.tail())
# add column with date
df['Datum'] = date
return df
def parse_format_v4(file, column_label, n_missing=None):
date_parts = re.findall(r'(\d{1,2})-(.*?)-(\d{4})', str(file))[0]
# find date
date = datetime.date(
int(date_parts[2]),
int(date_parts[1]),
int(date_parts[0])
)
df = pandas.read_csv(file, sep=";")
# find the number of missing locations
missing = re.findall(
r'Bij (\d+) personen is de woonplaats niet van bekend',
df.iat[0, 1]
)
try:
n_missing = int(missing[0])
except Exception:
pass
try:
df['id'] = df['Gemnr'].astype(int)
del df['Gemnr']
except Exception:
pass
# remove junk rows
df = df[df['id'] >= 0].copy()
df["Aantal"] = df[column_label]
# append row with missing numbers
df = df.append({
"Gemeente": None,
"id": -1,
"Aantal": n_missing},
ignore_index=True)
# print(df.tail())
# add column with date
df['Datum'] = date
df = df[["Datum", "Gemeente", "id", "Aantal"]]
return df
def merge_df_days(df_dict):
result = pandas.concat(
df_dict.values(),
axis=0,
sort=False
).dropna(axis=0, subset=["Aantal"])
result['Aantal'] = result['Aantal'].astype(int)
return result
def merge_hosp():
df_frames = {
"raw_data/peildatum-31-03-2020-14-00.csv": None,
"raw_data/peildatum-04-04-2020-12-45.csv": parse_format_v3("raw_data/peildatum-04-04-2020-12-45.csv"),
"raw_data/peildatum-01-04-2020-13-58.csv": parse_format_v3("raw_data/peildatum-01-04-2020-13-58.csv"),
"raw_data/peildatum-02-04-2020-14-00.csv": parse_format_v3("raw_data/peildatum-02-04-2020-14-00.csv"),
"raw_data/peildatum-31-03-2020-19-20.csv": parse_format_v3("raw_data/peildatum-31-03-2020-19-20.csv"),
"raw_data/peildatum-03-04-2020-14-00.csv": parse_format_v3("raw_data/peildatum-03-04-2020-14-00.csv"),
"raw_data/peildatum-07-04-2020-13-55.csv": parse_format_v3("raw_data/peildatum-07-04-2020-13-55.csv"),
"raw_data/peildatum-05-04-2020-14-15.csv": parse_format_v3("raw_data/peildatum-05-04-2020-14-15.csv"),
"raw_data/peildatum-06-04-2020-13-50.csv": parse_format_v3("raw_data/peildatum-06-04-2020-13-50.csv")
}
# files not in the list above
for file in Path('raw_data').glob('peildatum*.csv'):
if str(file) not in df_frames.keys():
print(f"Parse file {file}")
df_frames[str(file)] = parse_format_v4(file, "Zkh opname")
result = merge_df_days(df_frames)
# add municipality to data
df_mun = pandas.read_csv(
Path("ext", "Gemeenten_alfabetisch_2019.csv"), sep=";"
)[["Gemeentecode", "Gemeentenaam", "Provincienaam"]]
result = result.\
merge(df_mun, left_on="id", right_on="Gemeentecode", how="left").\
drop(["id"], axis=1)
result = result[
["Datum", "Gemeentenaam", "Gemeentecode", "Provincienaam", "Aantal"]
].sort_values(["Datum", "Gemeentecode"]). \
fillna({"Gemeentecode": -1})
result["Gemeentecode"] = result["Gemeentecode"].astype(int)
result = result[result["Aantal"] != 0]
print(result.tail())
result.to_csv(Path("data", "rivm_NL_covid19_hosp_municipality.csv"), index=False)
def merge_postest():
df_frames = {
"raw_data/peildatum-31-03-2020-14-00.csv": None,
"raw_data/peildatum-04-04-2020-12-45.csv": None,
"raw_data/peildatum-01-04-2020-13-58.csv": None,
"raw_data/peildatum-02-04-2020-14-00.csv": None,
"raw_data/peildatum-31-03-2020-19-20.csv": None,
"raw_data/peildatum-03-04-2020-14-00.csv": None,
"raw_data/peildatum-07-04-2020-13-55.csv": None,
"raw_data/peildatum-05-04-2020-14-15.csv": None,
"raw_data/peildatum-06-04-2020-13-50.csv": None,
}
# files not in the list above
for file in Path('raw_data').glob('peildatum*.csv'):
if str(file) not in df_frames.keys():
print(f"Parse file {file}")
df_frames[str(file)] = parse_format_v4(file, "Meldingen")
result = merge_df_days(df_frames)
# add municipality to data
df_mun = pandas.read_csv(
Path("ext", "Gemeenten_alfabetisch_2019.csv"), sep=";"
)[["Gemeentecode", "Gemeentenaam", "Provincienaam"]]
result = result.\
merge(df_mun, left_on="id", right_on="Gemeentecode", how="left").\
drop(["id"], axis=1)
result = result[
["Datum", "Gemeentenaam", "Gemeentecode", "Provincienaam", "Aantal"]
]
# add old data
df_total_old = pandas.read_csv(Path("data", "rivm_corona_in_nl.csv"))
result = result.append(df_total_old)
# sort values and adjust dtypes
result = result.sort_values(["Datum", "Gemeentecode"]). \
fillna({"Gemeentecode": -1})
result["Gemeentecode"] = result["Gemeentecode"].astype(int)
result = result[result["Aantal"] != 0]
print(result.tail())
result.to_csv(Path("data", "rivm_NL_covid19_total_municipality.csv"), index=False)
def merge_dead():
df_frames = {
"raw_data/peildatum-31-03-2020-14-00.csv": None,
"raw_data/peildatum-31-03-2020-19-20.csv": None,
"raw_data/peildatum-01-04-2020-13-58.csv": None,
"raw_data/peildatum-02-04-2020-14-00.csv": None,
"raw_data/peildatum-03-04-2020-14-00.csv": None,
"raw_data/peildatum-04-04-2020-12-45.csv": None,
"raw_data/peildatum-05-04-2020-14-15.csv": None,
"raw_data/peildatum-06-04-2020-13-50.csv": None,
"raw_data/peildatum-07-04-2020-13-55.csv": None,
"raw_data/peildatum-08-04-2020-13-55.csv": None,
"raw_data/peildatum-09-04-2020-13-50.csv": None,
"raw_data/peildatum-10-04-2020-14-20.csv": None,
"raw_data/peildatum-11-04-2020-14-00.csv": None,
"raw_data/peildatum-12-04-2020-14-00.csv": None,
"raw_data/peildatum-13-04-2020-14-00.csv": None,
"raw_data/peildatum-14-04-2020-14-00.csv": None,
"raw_data/peildatum-15-04-2020-14-00.csv": None,
"raw_data/peildatum-16-04-2020-14-00.csv": None,
"raw_data/peildatum-17-04-2020-14-00.csv": None,
"raw_data/peildatum-17-04-2020-16-00.csv": None,
}
# files not in the list above
for file in Path('raw_data').glob('peildatum*.csv'):
if str(file) not in df_frames.keys():
print(f"Parse file {file}")
df_frames[str(file)] = parse_format_v4(file, "Overleden")
result = merge_df_days(df_frames)
# add municipality to data
df_mun = pandas.read_csv(
Path("ext", "Gemeenten_alfabetisch_2019.csv"), sep=";"
)[["Gemeentecode", "Gemeentenaam", "Provincienaam"]]
result = result.\
merge(df_mun, left_on="id", right_on="Gemeentecode", how="left").\
drop(["id"], axis=1)
result = result[
["Datum", "Gemeentenaam", "Gemeentecode", "Provincienaam", "Aantal"]
].sort_values(["Datum", "Gemeentecode"]). \
fillna({"Gemeentecode": -1})
result["Gemeentecode"] = result["Gemeentecode"].astype(int)
result = result[result["Aantal"] != 0]
print(result.tail())
result.to_csv(Path("data", "rivm_NL_covid19_fatalities_municipality.csv"), index=False)
if __name__ == '__main__':
merge_hosp()
merge_postest()
merge_dead()
| 29.512111 | 110 | 0.610623 |
79488c9d3351f105daf599408345c01b60d37577
| 9,661 |
py
|
Python
|
tests/api/admin/controller/test_sitewide_registration.py
|
aseefahmed/circulation
|
17cbc9186ab3cde9606912559f92b393ac18ecaa
|
[
"Apache-2.0"
] | null | null | null |
tests/api/admin/controller/test_sitewide_registration.py
|
aseefahmed/circulation
|
17cbc9186ab3cde9606912559f92b393ac18ecaa
|
[
"Apache-2.0"
] | 44 |
2022-01-20T01:31:32.000Z
|
2022-03-31T01:50:41.000Z
|
tests/api/admin/controller/test_sitewide_registration.py
|
jonathangreen/circulation
|
118866f8257e2a97431a28ea5ba8e34e5bd393eb
|
[
"Apache-2.0"
] | null | null | null |
import base64
import binascii
import json
import os
import flask
import jwt
from werkzeug.datastructures import MultiDict
from api.admin.problem_details import *
from api.config import Configuration
from core.model import ExternalIntegration
from core.testing import MockRequestsResponse
from core.util.problem_detail import ProblemDetail
from .test_controller import SettingsControllerTest
class TestSitewideRegistration(SettingsControllerTest):
def test_sitewide_registration_post_errors(self):
def assert_remote_integration_error(response, message=None):
assert REMOTE_INTEGRATION_FAILED.uri == response.uri
assert REMOTE_INTEGRATION_FAILED.title == response.title
if message:
assert message in response.detail
metadata_wrangler_service = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL,
url=self._url,
)
default_form = None
controller = self.manager.admin_metadata_services_controller
# If ExternalIntegration is given, a ProblemDetail is returned.
with self.request_context_with_admin("/"):
response = controller.process_sitewide_registration(
None, do_get=self.do_request
)
assert MISSING_SERVICE == response
# If an error is raised during registration, a ProblemDetail is returned.
def error_get(*args, **kwargs):
raise RuntimeError("Mock error during request")
with self.request_context_with_admin("/"):
response = controller.process_sitewide_registration(
metadata_wrangler_service, do_get=error_get
)
assert_remote_integration_error(response)
# # If the response has the wrong media type, a ProblemDetail is returned.
self.responses.append(
MockRequestsResponse(200, headers={"Content-Type": "text/plain"})
)
with self.request_context_with_admin("/"):
response = controller.process_sitewide_registration(
metadata_wrangler_service, do_get=self.do_request
)
assert_remote_integration_error(
response, "The service did not provide a valid catalog."
)
# If the response returns a ProblemDetail, its contents are wrapped
# in another ProblemDetail.
status_code, content, headers = MULTIPLE_BASIC_AUTH_SERVICES.response
self.responses.append(MockRequestsResponse(content, headers, status_code))
with self.request_context_with_admin("/"):
response = controller.process_sitewide_registration(
metadata_wrangler_service, do_get=self.do_request
)
assert isinstance(response, ProblemDetail)
assert response.detail.startswith(
"Remote service returned a problem detail document:"
)
assert str(MULTIPLE_BASIC_AUTH_SERVICES.detail) in response.detail
# If no registration link is available, a ProblemDetail is returned
catalog = dict(id=self._url, links=[])
headers = {"Content-Type": "application/opds+json"}
self.responses.append(
MockRequestsResponse(200, content=json.dumps(catalog), headers=headers)
)
with self.request_context_with_admin("/"):
response = controller.process_sitewide_registration(
metadata_wrangler_service, do_get=self.do_request
)
assert_remote_integration_error(
response, "The service did not provide a register link."
)
# If no registration details are given, a ProblemDetail is returned
link_type = self.manager.admin_settings_controller.METADATA_SERVICE_URI_TYPE
catalog["links"] = [dict(rel="register", href=self._url, type=link_type)]
registration = dict(id=self._url, metadata={})
self.responses.extend(
[
MockRequestsResponse(
200, content=json.dumps(registration), headers=headers
),
MockRequestsResponse(200, content=json.dumps(catalog), headers=headers),
]
)
with self.request_context_with_admin("/", method="POST"):
response = controller.process_sitewide_registration(
metadata_wrangler_service,
do_get=self.do_request,
do_post=self.do_request,
)
assert_remote_integration_error(
response, "The service did not provide registration information."
)
# If we get all the way to the registration POST, but that
# request results in a ProblemDetail, that ProblemDetail is
# passed along.
self.responses.extend(
[
MockRequestsResponse(
200, content=json.dumps(registration), headers=headers
),
MockRequestsResponse(200, content=json.dumps(catalog), headers=headers),
]
)
def bad_do_post(self, *args, **kwargs):
return MULTIPLE_BASIC_AUTH_SERVICES
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict(
[
("integration_id", metadata_wrangler_service.id),
]
)
response = controller.process_sitewide_registration(
metadata_wrangler_service, do_get=self.do_request, do_post=bad_do_post
)
assert MULTIPLE_BASIC_AUTH_SERVICES == response
def test_sitewide_registration_post_success(self):
# A service to register with
metadata_wrangler_service = self._external_integration(
ExternalIntegration.METADATA_WRANGLER,
goal=ExternalIntegration.METADATA_GOAL,
url=self._url,
)
# The service knows this site's public key, and is going
# to use it to encrypt a shared secret.
public_key, private_key = self.manager.sitewide_key_pair
encryptor = Configuration.cipher(public_key)
# A catalog with registration url
register_link_type = (
self.manager.admin_settings_controller.METADATA_SERVICE_URI_TYPE
)
registration_url = self._url
catalog = dict(
id=metadata_wrangler_service.url,
links=[
dict(rel="collection-add", href=self._url, type="collection"),
dict(rel="register", href=registration_url, type=register_link_type),
dict(rel="collection-remove", href=self._url, type="collection"),
],
)
headers = {"Content-Type": "application/opds+json"}
self.responses.append(
MockRequestsResponse(200, content=json.dumps(catalog), headers=headers)
)
# A registration document with an encrypted secret
shared_secret = binascii.hexlify(os.urandom(24))
encrypted_secret = base64.b64encode(encryptor.encrypt(shared_secret))
registration = dict(
id=metadata_wrangler_service.url,
metadata=dict(shared_secret=encrypted_secret.decode("utf-8")),
)
self.responses.insert(
0, MockRequestsResponse(200, content=json.dumps(registration))
)
with self.request_context_with_admin("/", method="POST"):
flask.request.form = MultiDict(
[
("integration_id", metadata_wrangler_service.id),
]
)
response = self.manager.admin_metadata_services_controller.process_sitewide_registration(
metadata_wrangler_service,
do_get=self.do_request,
do_post=self.do_request,
)
assert None == response
# We made two requests: a GET to get the service document from
# the metadata wrangler, and a POST to the registration
# service, with the entity-body containing a callback URL and
# a JWT.
metadata_wrangler_service_request, registration_request = self.requests
url, i1, i2 = metadata_wrangler_service_request
assert metadata_wrangler_service.url == url
url, [document], ignore = registration_request
assert url == registration_url
for k in "url", "jwt":
assert k in document
# The end result is that our ExternalIntegration for the metadata
# wrangler has been updated with a (decrypted) shared secret.
assert shared_secret.decode("utf-8") == metadata_wrangler_service.password
def test_sitewide_registration_document(self):
"""Test the document sent along to sitewide registration."""
controller = self.manager.admin_metadata_services_controller
with self.request_context_with_admin("/"):
doc = controller.sitewide_registration_document()
# The registrar knows where to go to get our public key.
assert doc["url"] == controller.url_for("public_key_document")
# The JWT proves that we control the public/private key pair.
public_key, private_key = self.manager.sitewide_key_pair
parsed = jwt.decode(doc["jwt"], public_key, algorithm="RS256")
# The JWT must be valid or jwt.decode() would have raised
# an exception. This simply verifies that the JWT includes
# an expiration date and doesn't last forever.
assert "exp" in parsed
| 41.642241 | 101 | 0.642791 |
79488dbab228338d8a6abff6b83420ae0c637b13
| 4,895 |
py
|
Python
|
tests/testSupport.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | 1 |
2015-07-25T07:39:28.000Z
|
2015-07-25T07:39:28.000Z
|
tests/testSupport.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | null | null | null |
tests/testSupport.py
|
musca1997/drawbot
|
d5b990c74289ba437e81933423a09b0e4839494c
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import PY3
import sys
import os
import tempfile
import shutil
import random
import io
from PIL import Image, ImageChops
from drawBot.misc import warnings
from fontTools.misc.py23 import PY2
testRootDir = os.path.dirname(os.path.abspath(__file__))
testDataDir = os.path.join(testRootDir, "data")
tempTestDataDir = os.path.join(testRootDir, "tempTestData")
if not os.path.exists(tempTestDataDir):
os.mkdir(tempTestDataDir)
warnings.shouldShowWarnings = True
class StdOutCollector(object):
def __init__(self, **kwargs):
# force captureStdErr to be a keyword argument
if kwargs:
captureStdErr = kwargs["captureStdErr"]
assert len(kwargs) == 1
else:
captureStdErr = False
self.captureStdErr = captureStdErr
self._stream = io.StringIO()
def __enter__(self):
self.out = sys.stdout
self.err = sys.stderr
sys.stdout = self
if self.captureStdErr:
sys.stderr = self
return self
def __exit__(self, type, value, traceback):
sys.stdout = self.out
sys.stderr = self.err
def write(self, txt):
if PY2 and not isinstance(txt, unicode):
txt = txt.decode("utf-8")
self._stream.write(txt)
def flush(self):
pass
def lines(self):
return self._stream.getvalue().splitlines()
class TempFile(object):
"""This context manager will deliver a pathname for a temporary file, and will
remove it upon exit, if it indeed exists at that time. Note: it does _not_
_create_ the temporary file.
>>> with TempFile() as tmp:
... assert not os.path.exists(tmp.path)
... f = open(tmp.path, "wb")
... b = f.write(b"hello.")
... f.close()
... assert os.path.exists(tmp.path)
...
>>> assert not os.path.exists(tmp.path)
>>> with TempFile(suffix=".png") as tmp:
... assert tmp.path.endswith(".png")
...
"""
_create = staticmethod(tempfile.mktemp)
_destroy = staticmethod(os.remove)
def __init__(self, suffix="", prefix="tmp", dir=None):
self.suffix = suffix
self.prefix = prefix
self.dir = dir
self.path = None
def __enter__(self):
self.path = self._create(suffix=self.suffix, prefix=self.prefix, dir=self.dir)
return self
def __exit__(self, type, value, traceback):
if os.path.exists(self.path):
self._destroy(self.path)
class TempFolder(TempFile):
"""This context manager will create a temporary folder, and will remove it upon exit.
>>> with TempFolder() as tmp:
... assert os.path.exists(tmp.path)
... assert os.listdir(tmp.path) == []
...
>>> assert not os.path.exists(tmp.path)
>>> with TempFolder(suffix=".mystuff") as tmp:
... assert tmp.path.endswith(".mystuff")
...
"""
_create = staticmethod(tempfile.mkdtemp)
_destroy = staticmethod(shutil.rmtree)
def randomSeed(a):
if PY3:
return random.seed(a, version=1) # compatible with Python 2
else:
return random.seed(a)
def readData(path):
"""Return the raw data from a path."""
with open(path, "rb") as f:
return f.read()
def compareImages(path1, path2):
"""Compare two image files and return a number representing how similar they are.
A value of 0 means that the images are identical, a value of 1 means they are maximally
different or not comparable (for example, when their dimensions differ).
"""
im1 = Image.open(path1)
im2 = Image.open(path2)
if im1.size != im2.size:
# Dimensions differ, can't compare further
return 1
if im1 == im2:
# Image data is identical (I checked PIL's Image.__eq__ method: it's solid)
return 0
# Get the difference between the images
diff = ImageChops.difference(im1, im2)
# We'll calculate the average difference based on the histogram provided by PIL
hist = diff.histogram()
assert len(hist) == 4 * 256 # Assuming 4x8-bit RGBA for now. TODO: make this work for L and RGB modes
# Sum the histograms of each channel
summedHist = [sum(hist[pixelValue + ch * 256] for ch in range(4)) for pixelValue in range(256)]
assert len(summedHist) == 256
assert sum(hist) == sum(summedHist)
# Calculate the average of the difference
# First add all pixel values together
totalSum = sum(summedHist[pixelValue] * pixelValue for pixelValue in range(256))
# Then divide by the total number of channel values
average = totalSum / sum(summedHist)
# Scale pixel value range from 0-255 to 0-1
average = average / 255
assert 0.0 <= average <= 1.0
return average
| 29.666667 | 106 | 0.636568 |
79488f1c5e756d75ae90977f185676bdf7e095a2
| 6,184 |
py
|
Python
|
chainer/iterators/serial_iterator.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | null | null | null |
chainer/iterators/serial_iterator.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | 11 |
2020-01-28T22:49:05.000Z
|
2022-03-11T23:50:27.000Z
|
chainer/iterators/serial_iterator.py
|
Evanc123/chainer
|
929af7189b1271683200aa9b0ba6da2dd3dee110
|
[
"MIT"
] | null | null | null |
from __future__ import division
import numpy
from chainer.dataset import iterator
from chainer.iterators.order_samplers import ShuffleOrderSampler
class SerialIterator(iterator.Iterator):
"""Dataset iterator that serially reads the examples.
This is a simple implementation of :class:`~chainer.dataset.Iterator`
that just visits each example in either the order of indexes or a shuffled
order.
To avoid unintentional performance degradation, the ``shuffle`` option is
set to ``True`` by default. For validation, it is better to set it to
``False`` when the underlying dataset supports fast slicing. If the
order of examples has an important meaning and the updater depends on the
original order, this option should be set to ``False``.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
Args:
dataset: Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguements: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
"""
def __init__(self, dataset, batch_size,
repeat=True, shuffle=None, order_sampler=None):
self.dataset = dataset
self.batch_size = batch_size
self._repeat = repeat
self._shuffle = shuffle
if self._shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self._shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self.reset()
def __next__(self):
if not self._repeat and self.epoch > 0:
raise StopIteration
self._previous_epoch_detail = self.epoch_detail
i = self.current_position
i_end = i + self.batch_size
N = self._epoch_size
if self._order is None:
batch = self.dataset[i:i_end]
else:
batch = [self.dataset[index] for index in self._order[i:i_end]]
if i_end >= N:
if self._repeat:
rest = i_end - N
if self._order is not None:
new_order = self.order_sampler(self._order, i)
if len(self._order) != len(new_order):
raise ValueError('The size of order does not match '
'the size of the previous order.')
self._order = new_order
if rest > 0:
if self._order is None:
batch.extend(self.dataset[:rest])
else:
batch.extend([self.dataset[index]
for index in self._order[:rest]])
self.current_position = rest
else:
self.current_position = 0
self.epoch += 1
self.is_new_epoch = True
else:
self.is_new_epoch = False
self.current_position = i_end
return batch
next = __next__
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def serialize(self, serializer):
self.current_position = serializer('current_position',
self.current_position)
self.epoch = serializer('epoch', self.epoch)
self.is_new_epoch = serializer('is_new_epoch', self.is_new_epoch)
if self._order is not None:
try:
serializer('order', self._order)
except KeyError:
serializer('_order', self._order)
try:
self._previous_epoch_detail = serializer(
'previous_epoch_detail', self._previous_epoch_detail)
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
self.current_position = 0
self.epoch = 0
self.is_new_epoch = False
# use -1 instead of None internally.
self._previous_epoch_detail = -1.
if self.order_sampler:
self._order = self.order_sampler(
numpy.arange(len(self.dataset)), 0)
else:
self._order = None
@property
def _epoch_size(self):
if self._order is None:
return len(self.dataset)
else:
return len(self._order)
@property
def repeat(self):
return self._repeat
| 36.809524 | 78 | 0.582309 |
79489066a377e95b81e8fa02478b8ec08c393d6d
| 1,932 |
py
|
Python
|
ROS/samana_ws/src/samana/src/mapviz_pub_floats.py
|
Combinacijus/Samana-Autonomous-Robot
|
092288df7d7733d8014b98169dd79858d48f5c7f
|
[
"MIT"
] | 4 |
2021-01-23T18:35:43.000Z
|
2021-12-26T09:03:53.000Z
|
ROS/samana_ws/src/samana/src/mapviz_pub_floats.py
|
Combinacijus/Samana-Autonomous-Robot
|
092288df7d7733d8014b98169dd79858d48f5c7f
|
[
"MIT"
] | null | null | null |
ROS/samana_ws/src/samana/src/mapviz_pub_floats.py
|
Combinacijus/Samana-Autonomous-Robot
|
092288df7d7733d8014b98169dd79858d48f5c7f
|
[
"MIT"
] | 1 |
2021-04-08T06:13:10.000Z
|
2021-04-08T06:13:10.000Z
|
#!/usr/bin/env python
# Subscribes to many topics and publishes some of it's data to debug Float topic for mapviz to display
import rospy
from math import degrees
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from samana_msgs.msg import ImuSmall
from std_msgs.msg import Float32
from tf.transformations import euler_from_quaternion
from random import random
imu_msg = Imu()
class FloatDebugger():
def __init__(self):
rospy.init_node("mapviz_pub_floats")
rospy.Subscriber("imu_data", ImuSmall, self.imu_callback)
rospy.Subscriber("odometry/global", Odometry, self.odom_global_callback)
rospy.spin()
def pub_float(self, topic, number):
pub = rospy.Publisher(topic, Float32, queue_size=1)
msg = Float32()
msg.data = number
pub.publish(msg)
def imu_callback(self, data):
if random() < 0.6: # To limit publish rate
return
# Publishes imu yaw corrected for mounting X to East = 0deg
rpy = euler_from_quaternion([data.quaternion_x, data.quaternion_y, data.quaternion_z, data.quaternion_w])
self.pub_float("debug/yaw_imu", degrees(rpy[2]) - 88) # NOTE: hardcoded transform
self.pub_float("debug/pitch_imu", degrees(rpy[0])) # NOTE: roll axis due to mounting
def odom_global_callback(self, data):
if random() < 0.75: # To limit publish rate
return
# Publishes imu yaw corrected for mounting X to East = 0deg
orient = data.pose.pose.orientation
pos = data.pose.pose.position
rpy = euler_from_quaternion([orient.x, orient.y, orient.z, orient.w])
self.pub_float("debug/yaw_odom", degrees(rpy[2]))
self.pub_float("debug/x", pos.x)
self.pub_float("debug/y", pos.y)
if __name__ == '__main__':
try:
float_debugger = FloatDebugger()
except rospy.ROSInterruptException:
pass
| 34.5 | 113 | 0.675983 |
7948907c11f0a5c29179b603873e7e6c56da8558
| 9,876 |
py
|
Python
|
app/views/api/web/Tap.py
|
RedFalsh/flask-example
|
561b04d31356cf1a4a0324a89ebc4f3dcd94e16c
|
[
"MIT"
] | 1 |
2019-02-26T07:40:07.000Z
|
2019-02-26T07:40:07.000Z
|
app/views/api/web/Tap.py
|
RedFalsh/flask-example
|
561b04d31356cf1a4a0324a89ebc4f3dcd94e16c
|
[
"MIT"
] | null | null | null |
app/views/api/web/Tap.py
|
RedFalsh/flask-example
|
561b04d31356cf1a4a0324a89ebc4f3dcd94e16c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
from app.views.api import route_api
from flask import request,jsonify,g
import requests,json
from app import db
from app.model import User
from app.model import Member
from app.model import Device
from app.model import DeviceTap
from app.model import DeviceOperateLog
from app.model import DeviceOnlineLog
from app.model import DevicePowerLog
from app.model import DeviceTime
from app.common.libs.Logging import logger
from app.common.libs.UserService import UserService
from app.common.libs.DeviceService import DeviceService
from app.common.libs.Helper import getCurrentDate, getFormatDate
from sqlalchemy import desc
# 数据库修改方法
@route_api.route("/tap/sqldatainit",methods = [ "GET","POST" ])
def tapSqlDataInit():
resp = { 'code':20000, 'message':'初始化成功', 'data':{}}
device_list = Device.query.all()
for device in device_list:
tap1 = DeviceTap()
tap1.device_id = device.id
tap1.alias = device.alias1
tap1.status = device.status1
db.session.add(tap1)
tap2 = DeviceTap()
tap2.device_id = device.id
tap2.alias = device.alias2
tap2.status = device.status2
db.session.add(tap2)
db.session.commit()
return jsonify( resp )
# 用户界面查询
@route_api.route("/tap/list",methods = [ "GET","POST" ])
def tapList():
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
user_info = UserService.getUserInfo(request)
req = request.values
if user_info.roles == 'editor':
resp = DeviceService.filterDeviceByEditor(req)
return jsonify( resp )
else:
resp = DeviceService.filterDeviceByUser(req, user_info)
return jsonify( resp )
return jsonify( resp )
@route_api.route("/tap/edit",methods = [ "GET","POST" ])
def tapEdit():
resp = { 'code':20000, 'message':'修改成功', 'data':{}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
if not sn or len( sn ) < 1:
resp['code'] = -1
resp['message'] = "need sn"
return jsonify(resp)
device_info = Device.query.filter_by( sn = sn ).first()
if not device_info:
resp['code'] = -1
resp['message'] = '失败,设备不存在~'
return jsonify(resp)
position = req['position'] if 'position' in req else ''
alias1 = req['alias1'] if 'alias1' in req else ''
alias2 = req['alias2'] if 'alias2' in req else ''
device_info.alias1 = alias1
device_info.alias2 = alias2
device_info.position = position
db.session.commit()
return jsonify(resp)
@route_api.route("/tap/info",methods = [ "GET","POST" ])
def tapInfo():
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
if not sn or len( sn ) < 1:
resp['code'] = -1
resp['message'] = "need sn"
return jsonify(resp)
resp = DeviceService.tapInfo(sn)
return jsonify( resp )
@route_api.route("/tap/position/list",methods = [ "GET","POST" ])
def tapPositionList():
resp = { 'code':20000, 'message':'修改成功', 'data':{}}
req = request.values
user_info = UserService.getUserInfo(request)
positions = db.session.query(Device.position)\
.filter(Member.id == Device.member_id )\
.filter(User.mobile == Member.mobile )\
.all()
items = [p[0] for p in positions]
items = list(set(items))
resp['data']['items'] = items
return jsonify(resp)
@route_api.route("/tap/operate/log/list",methods = [ "GET","POST" ])
def tapOperateLogList():
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
datetimeStart = req['datetimeStart'] if 'datetimeStart' in req else ''
datetimeEnd = req['datetimeEnd'] if 'datetimeEnd' in req else ''
user_info = UserService.getUserInfo(request)
query = db.session.query(DeviceOperateLog, DeviceTap).filter( DeviceOperateLog.device_id == Device.id ).filter( Device.sn == sn )\
.filter( DeviceOperateLog.device_tap_id == DeviceTap.id)\
.filter( DeviceOperateLog.time.between(datetimeStart, datetimeEnd) )
total = query.count()
tap_log_list = query.order_by(desc(DeviceOperateLog.time)).offset( offset ).limit( limit ).all()
items = []
for log,tap in tap_log_list:
items.append({
'alias': tap.alias,
'msg': log.msg,
'time': getFormatDate(log.time)
})
resp['data']['items'] = items
resp['data']['total'] = total
return jsonify( resp )
# 设备上下线记录
@route_api.route("/tap/online/log/list",methods = [ "GET","POST" ])
def tapOnlineLogList():
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
query = DeviceOnlineLog.query.filter( DeviceOnlineLog.device_id == Device.id ).filter( Device.sn == sn )
total = query.count()
log_list = query.order_by(desc(DeviceOnlineLog.time)).offset( offset ).limit( limit ).all()
items = []
for log in log_list:
items.append({
'online': log.online,
'time': getFormatDate(log.time)
})
resp['data']['items'] = items
resp['data']['total'] = total
return jsonify( resp )
# 设备电量记录
@route_api.route("/tap/power/log/list",methods = [ "GET","POST" ])
def tapPowerLogList():
resp = { 'code':20000, 'message':'查询成功', 'data':{}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
page = int( req['page'] ) if 'page' in req else 0
limit = int( req['limit'] ) if 'limit' in req else 0
offset = ( page - 1 ) * limit
query = DevicePowerLog.query.filter( DevicePowerLog.device_id == Device.id ).filter( Device.sn == sn )
total = query.count()
log_list = query.order_by(desc(DevicePowerLog.time)).offset( offset ).limit( limit ).all()
items = []
for log in log_list:
items.append({
'power': str(log.power),
'time': getFormatDate(log.time)
})
resp['data']['items'] = items
resp['data']['total'] = total
return jsonify( resp )
# 设备定时任务相关api
@route_api.route("/tap/clock/list",methods = [ "GET","POST" ])
def tapClockList():
resp = {'code': 20000, 'message': 'ok~', 'data': {}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
if not sn or len( sn ) < 1:
resp['code'] = -1
resp['message'] = "need sn"
return jsonify(resp)
number = req['number'] if 'number' in req else 0
if number == 0:
resp['code'] = -1
resp['message'] = "need number"
return jsonify(resp)
time_list = db.session.query(DeviceTime).\
filter(Device.sn==sn).\
filter(DeviceTime.device_id==Device.id).\
filter(DeviceTime.switch_num==number).\
all()
items = []
for d in time_list:
items.append({
'id':d.id,
'type':d.type,
'alive': d.alive,
'period':d.period,
'open_time':d.open_time,
'number':d.switch_num,
'close_time':d.close_time
})
resp['data']['items'] = items
return jsonify(resp)
@route_api.route("/tap/clock/add",methods = [ "GET","POST" ])
def tapClockAdd():
resp = {'code': 20000, 'message': '添加成功~', 'data': {}}
req = request.values
sn = req['sn'] if 'sn' in req else ''
if not sn or len( sn ) < 1:
resp['code'] = -1
resp['message'] = "需要sn~"
return jsonify(resp)
device_info = Device.query.filter_by( sn = sn ).first()
if not device_info:
resp['code'] = -1
resp['message'] = "当前设备不存在~~"
return jsonify(resp)
time_model = DeviceTime()
_type = int(req['type']) if 'type' in req else 0
open_time = req['open_time'] if 'open_time' in req else ''
close_time = req['close_time'] if 'close_time' in req else ''
number = int(req['number']) if 'number' in req else 0
time_model.type = _type
time_model.alive = 1
time_model.device_id = device_info.id
time_model.switch_num = number
time_model.open_time = open_time
time_model.close_time = close_time
time_model.created_time = getCurrentDate()
db.session.add(time_model)
db.session.commit()
return jsonify(resp)
@route_api.route("/tap/clock/edit",methods = [ "GET","POST" ])
def tapClockEdit():
resp = {'code': 20000, 'message': '修改成功~', 'data': {}}
req = request.values
id = int(req['id']) if 'id' in req else 0
if id < 1:
resp['code'] = -1
resp['message'] = "need id"
return jsonify(resp)
time_info = DeviceTime.query.filter_by(id=id).first()
_type = int(req['type']) if 'type' in req else 0
open_time = req['open_time'] if 'open_time' in req else ''
close_time = req['close_time'] if 'close_time' in req else ''
time_info.type = _type
time_info.open_time = open_time
time_info.close_time = close_time
time_info.updated_time = getCurrentDate()
db.session.commit()
return jsonify(resp)
@route_api.route("/tap/clock/delete",methods = [ "GET","POST" ])
def tapClockDelete():
resp = {'code': 20000, 'message': '删除成功~', 'data': {}}
req = request.values
id = int(req['id']) if 'id' in req else 0
if id < 1:
resp['code'] = -1
resp['message'] = "需要id"
return jsonify(resp)
DeviceTime.query.filter_by(id = id).delete()
db.session.commit()
return jsonify(resp)
| 30.670807 | 134 | 0.600344 |
7948916da46e16967ffdfe022c4f4df863b0b3f3
| 138 |
py
|
Python
|
lambda_proxy_cache/__init__.py
|
vincentsarago/lambda-proxy-cache
|
e038ddcca3b683477a7f45231a29744ee896c0c1
|
[
"BSD-3-Clause"
] | 4 |
2019-06-29T06:56:57.000Z
|
2021-06-11T14:51:18.000Z
|
lambda_proxy_cache/__init__.py
|
vincentsarago/lambda-proxy-cache
|
e038ddcca3b683477a7f45231a29744ee896c0c1
|
[
"BSD-3-Clause"
] | 4 |
2019-06-29T19:45:14.000Z
|
2020-04-16T01:41:07.000Z
|
lambda_proxy_cache/__init__.py
|
vincentsarago/lambda-proxy-cache
|
e038ddcca3b683477a7f45231a29744ee896c0c1
|
[
"BSD-3-Clause"
] | 1 |
2019-06-29T17:01:39.000Z
|
2019-06-29T17:01:39.000Z
|
"""lambda-proxy-cache: lambda-proxy cache plugin."""
import pkg_resources
version = pkg_resources.get_distribution(__package__).version
| 23 | 61 | 0.804348 |
794891a08aa7314e3a2b585a7f01b9a11a3ff7ff
| 983 |
py
|
Python
|
tests/Python/OWASP_a4/cwe_22/unsafe/cwe_22__I_readline__F_no_filtering__S_file_exists__1-3.5_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
tests/Python/OWASP_a4/cwe_22/unsafe/cwe_22__I_readline__F_no_filtering__S_file_exists__1-3.5_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | 1 |
2022-01-31T22:22:55.000Z
|
2022-01-31T22:22:55.000Z
|
tests/Python/OWASP_a4/cwe_22/unsafe/cwe_22__I_readline__F_no_filtering__S_file_exists__1-3.5_File1.py
|
usnistgov/VTSG
|
f4477a78ec19f7e9757da0321cb5a69428e358cf
|
[
"MIT"
] | null | null | null |
'''
input: direct user input in string
no filtering
sink: check if a file exists
'''
'''
Created by Paul E. Black and William Mentzer 2020
This software was developed at the National Institute of Standards and Technology
by employees of the Federal Government in the course of their official duties.
Pursuant to title 17 Section 105 of the United States Code the software is not
subject to copyright protection and are in the public domain.
We would appreciate acknowledgment if the software is used.
Paul E. Black paul.black@nist.gov
William Mentzer willmentzer20@gmail.com
'''
import math
import os
import sys
def main():
tainted_2 = None
tainted_3 = None
tainted_2 = input()
tainted_3 = tainted_2
if((math.pow(4, 2)<=42)):
{}
else:
# No filtering (sanitization)
tainted_3 = tainted_2
#flaw
os.path.exists(tainted_3)
if __name__ == '__main__':
main()
| 21.369565 | 81 | 0.670397 |
794891d3477b16bcfccbb3d35e747f6b5a0c04c2
| 4,281 |
py
|
Python
|
tests/test_inspector.py
|
gustavofonseca/Logger
|
d6b48f98393ebee8e39b04eb44c80a0fccd98ffb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_inspector.py
|
gustavofonseca/Logger
|
d6b48f98393ebee8e39b04eb44c80a0fccd98ffb
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_inspector.py
|
gustavofonseca/Logger
|
d6b48f98393ebee8e39b04eb44c80a0fccd98ffb
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest
from logger.inspector import Inspector
from mocker import ANY, MockerTestCase
COLLECTIONS = {
'br': {
"status": "certified",
"original_name": "Brasil",
"document_count": 392414,
"acron": "scl",
"domain": "www.scielo.br",
"has_analytics": True,
"is_active": True,
"journal_count": {
"deceased": 42,
"suspended": 36,
"current": 295
},
"type": "journals",
"acron2": "br",
"name": {
"en": "Brazil",
"es": "Brasil",
"pt": "Brasil"
},
"code": "scl"
}
}
class TestInspectorTests(MockerTestCase):
def test_is_valid_filename_node1(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.1.log.gz')
self.assertTrue(insp._is_valid_filename())
expected = {
'date': '2015-12-30',
'collection': 'br'
}
self.assertEqual(expected, insp._parsed_fn.groupdict())
def test_is_valid_filename(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_filename())
expected = {
'date': '2015-12-30',
'collection': 'br'
}
self.assertEqual(expected, insp._parsed_fn.groupdict())
def test_is_valid_filename_false(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scilo.br.log.gz')
self.assertFalse(insp._is_valid_filename())
def test_is_valid_date_in_filename(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_date())
def test_is_valid_date_in_filename_false(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-31-12_scielo.br.log.gz')
self.assertFalse(insp._is_valid_date())
def test_is_valid_collection_in_filename(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_collection())
def test_is_invalid_collection_in_filename(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.xxx.log.gz')
self.assertFalse(insp._is_valid_collection())
def test_is_valid_source_directory(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_scielo.br.log.gz')
self.assertTrue(insp._is_valid_source_directory())
def test_is_valid_source_directory_false_1(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.br/2015-12-30_sciel.br.log.gz')
self.assertFalse(insp._is_valid_source_directory())
def test_is_valid_source_directory_false_2(self):
_insp = self.mocker.patch(Inspector)
_insp.get_collections()
self.mocker.result(COLLECTIONS)
self.mocker.replay()
insp = Inspector('/var/www/scielo.pepsic/2015-12-30_scielo.br.log.gz')
self.assertFalse(insp._is_valid_source_directory())
| 30.798561 | 78 | 0.627891 |
7948932f89ca88ec8169a3773f7e712e9c515288
| 3,048 |
py
|
Python
|
src/dnsrobocert/core/utils.py
|
foxwoods369/docker-letsencrypt-dns
|
301939c271c8e299d52971be9402b9f8d120abf4
|
[
"MIT"
] | null | null | null |
src/dnsrobocert/core/utils.py
|
foxwoods369/docker-letsencrypt-dns
|
301939c271c8e299d52971be9402b9f8d120abf4
|
[
"MIT"
] | null | null | null |
src/dnsrobocert/core/utils.py
|
foxwoods369/docker-letsencrypt-dns
|
301939c271c8e299d52971be9402b9f8d120abf4
|
[
"MIT"
] | null | null | null |
import hashlib
import logging
import os
import re
import subprocess
import sys
from typing import Any, Dict, List
import coloredlogs
try:
POSIX_MODE = True
import pwd
import grp
except ImportError:
POSIX_MODE = False
LOGGER = logging.getLogger(__name__)
coloredlogs.install(logger=LOGGER)
def execute(args: List[str], check: bool = True, env: Dict[str, str] = None):
if not env:
env = os.environ.copy()
env = env.copy()
env["PYTHONUNBUFFERED "] = "1"
call = subprocess.check_call if check else subprocess.call
LOGGER.info("Launching command: {0}".format(subprocess.list2cmdline(args)))
sys.stdout.write("----------\n")
sys.stdout.flush()
error = None
try:
call(args, env=env)
except subprocess.CalledProcessError as e:
error = e
sys.stdout.write("----------\n")
sys.stdout.flush()
if error:
raise error
def fix_permissions(certificate_permissions: Dict[str, Any], target_path: str):
files_mode = certificate_permissions.get("files_mode", 0o640)
dirs_mode = certificate_permissions.get("dirs_mode", 0o750)
os.chmod(target_path, dirs_mode)
uid = -1
gid = -1
user = certificate_permissions.get("user")
group = certificate_permissions.get("group")
if (user or group) and not POSIX_MODE:
LOGGER.warning(
"Setting user and group for certificates/keys is not supported on Windows."
)
elif POSIX_MODE:
if isinstance(user, int):
uid = user
elif isinstance(user, str):
uid = pwd.getpwnam(user)[2]
if isinstance(group, int):
gid = group
elif isinstance(group, str):
gid = grp.getgrnam(group)[2]
os.chown(target_path, uid, gid) # type: ignore
for root, dirs, files in os.walk(target_path):
for path in dirs:
os.chmod(os.path.join(root, path), dirs_mode)
for path in files:
os.chmod(os.path.join(root, path), files_mode)
if POSIX_MODE:
for path in files + dirs:
os.chown(os.path.join(root, path), uid, gid) # type: ignore
def configure_certbot_workspace(
dnsrobocert_config: Dict[str, Any], directory_path: str
):
live_path = os.path.join(directory_path, "archive")
archive_path = os.path.join(directory_path, "live")
if not os.path.exists(live_path):
os.makedirs(live_path)
if not os.path.exists(archive_path):
os.makedirs(archive_path)
certificate_permissions = dnsrobocert_config.get("acme", {}).get(
"certs_permissions", {}
)
fix_permissions(certificate_permissions, live_path)
fix_permissions(certificate_permissions, archive_path)
def digest(path: str):
if not os.path.exists(path):
return None
with open(path, "rb") as file_h:
config_data = file_h.read()
md5 = hashlib.md5()
md5.update(config_data)
return md5.digest()
def normalize_lineage(domain: str):
return re.sub(r"^\*\.", "", domain)
| 25.830508 | 87 | 0.641404 |
794895aae22e2d30d6c20fdd32dda30dd2c8bf2e
| 4,550 |
py
|
Python
|
tests/integrate_test/test_integrate_global_variable_score.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 52 |
2018-08-24T02:28:43.000Z
|
2021-07-06T04:44:22.000Z
|
tests/integrate_test/test_integrate_global_variable_score.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 62 |
2018-09-17T06:59:16.000Z
|
2021-12-15T06:02:51.000Z
|
tests/integrate_test/test_integrate_global_variable_score.py
|
bayeshack2016/icon-service
|
36cab484d2e41548d7f2f74526f127ee3a4423fc
|
[
"Apache-2.0"
] | 35 |
2018-09-14T02:42:10.000Z
|
2022-02-05T10:34:46.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IconScoreEngine testcase
"""
from typing import TYPE_CHECKING, List
from iconservice.icon_inner_service import MakeResponse
from tests.integrate_test.test_integrate_base import TestIntegrateBase
if TYPE_CHECKING:
from iconservice.base.address import Address
from iconservice.iconscore.icon_score_result import TransactionResult
def _create_query_request(from_: 'Address', to_: 'Address', method: str):
return {
"version": 3,
"from": from_,
"to": to_,
"dataType": "call",
"data": {"method": method}
}
class TestScoreGlobalVariable(TestIntegrateBase):
def setUp(self):
super().setUp()
sender: 'Address' = self._accounts[0].address
tx_results: List['TransactionResult'] = self.deploy_score(
score_root="sample_scores",
score_name="sample_global_variable_score",
from_=sender,
expected_status=True)
score_address: 'Address' = tx_results[0].score_address
request = _create_query_request(sender, score_address, "hello")
response = self._query(request)
self.assertEqual(response, "hello")
self.sender = sender
self.score_address = score_address
def _create_query_request(self, method: str):
return _create_query_request(self.sender, self.score_address, method)
def test_global_dict(self):
expected_response = {"a": 1, "b": [2, 3], "c": {"d": 4}}
expected_converted_response = {"a": "0x1", "b": ["0x2", "0x3"], "c": {"d": "0x4"}}
request: dict = self._create_query_request("getGlobalDict")
# First score call for query
response_0 = self._query(request)
assert isinstance(response_0, dict)
assert response_0 == expected_response
# make_response() does in-place value type conversion in response_0
converted_response = MakeResponse.make_response(response_0)
assert converted_response == expected_converted_response
assert response_0 != expected_response
assert id(converted_response) == id(response_0)
# Check if the response is deeply copied on every query call
response_1: dict = self._query(request)
assert isinstance(response_1, dict)
assert id(response_1) != id(response_0)
assert response_1 == expected_response
def test_global_list(self):
expected_response = [1, {"a": 1}, ["c", 2]]
expected_converted_response = ["0x1", {"a": "0x1"}, ["c", "0x2"]]
request: dict = self._create_query_request("getGlobalList")
# First score call for query
response_0: list = self._query(request)
assert isinstance(response_0, list)
assert response_0 == expected_response
# Check if the response is deeply copied on every query call
converted_response = MakeResponse.make_response(response_0)
assert converted_response == expected_converted_response
assert id(converted_response) == id(response_0)
response_1 = self._query(request)
assert isinstance(response_1, list)
assert id(response_1) != id(response_0)
assert response_1 == expected_response
def test_global_tuple(self):
expected_response = ({"a": 1}, 2, ["c", 2])
request: dict = self._create_query_request("getGlobalTuple")
# First score call for query
response_0: tuple = self._query(request)
assert isinstance(response_0, tuple)
assert response_0 == expected_response
converted_response = MakeResponse.make_response(response_0)
assert converted_response == expected_response
assert response_0 == expected_response
assert id(converted_response) == id(response_0)
response_1 = self._query(request)
assert isinstance(response_1, tuple)
assert id(response_1) != id(response_0)
assert response_1 == expected_response
| 36.693548 | 90 | 0.680659 |
794895d089241a36effb6aebadc198255b781561
| 2,364 |
py
|
Python
|
fastembed/predictor/lookup.py
|
Saleh-Ibtasham/VulScrape
|
738d17e9dd7e5edc2341d106361651fd28f99c61
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | 1 |
2021-04-12T12:59:33.000Z
|
2021-04-12T12:59:33.000Z
|
fastembed/predictor/lookup.py
|
Jokers-grin/VulScrape
|
738d17e9dd7e5edc2341d106361651fd28f99c61
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
fastembed/predictor/lookup.py
|
Jokers-grin/VulScrape
|
738d17e9dd7e5edc2341d106361651fd28f99c61
|
[
"PostgreSQL",
"Unlicense",
"MIT"
] | null | null | null |
import pandas as pd
from .classifier import get_result
from scipy import spatial
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
def getCveList(cve_list):
df_feature = pd.read_csv("./fastembed/ml_models/CVE_data_with_features.csv")
df_classifier = pd.read_csv("./fastembed/ml_models/CVE_classifier_data_with_features.csv")
cve_list = cve_list.split(",")
result_cve_list = []
for index, value in enumerate(cve_list):
result_cve_list.append("CVE-" + value.strip())
result_feature_df = df_feature.loc[df_feature['ID'].isin(result_cve_list)]
classifier_df = df_classifier.loc[df_classifier['ID'].isin(result_cve_list)]
print(result_cve_list)
if classifier_df.shape[0] != 0:
result_probability, result_truth = get_result(classifier_df.iloc[:,1:])
print(result_probability,result_truth)
result_feature_df["Exploited"] = result_truth
print(result_feature_df["Exploited"])
result_feature_df["Exploited_score"] = result_probability
print(result_feature_df["Exploited_score"])
result_feature_df["id"] = result_feature_df.index
return result_feature_df
def getCveSimilarity(df, data):
result_feature_df = []
data = np.reshape(data,(1,30))
result_similarities = cosine_similarity(data, df)
max_similarity = np.amax(result_similarities[0])
max_similarity_index = np.array(np.argmax(result_similarities[0], axis=0), ndmin=1)
# print(max_similarity, max_similarity_index[0])
result_feature_df.append(max_similarity)
result_feature_df.append(max_similarity_index[0])
return result_feature_df
def getSimilarCveList(cve_list):
df_feature = pd.read_csv("./fastembed/ml_models/CVE_data_with_features.csv")
result_cve_list = [cve[3] for cve in cve_list]
result_feature_df = df_feature.loc[df_feature['ID'].isin(result_cve_list)]
if result_feature_df.shape[0] == 0:
return
print(result_cve_list)
result_df = None
id = 0
for cve in cve_list:
temp_df = result_feature_df.loc[result_feature_df["ID"] == cve[3]]
temp_df["VULNERABILITY_KIND"] = cve[2]
temp_df["VULNERABILITY_RISK"] = cve[1]
temp_df["Source_code"] = cve[0]
temp_df["id"] = id
result_df = pd.concat([result_df,temp_df])
id += 1
return result_df
| 32.833333 | 94 | 0.715313 |
794895dbb411f5362117854a417dbd901cedf73c
| 1,128 |
py
|
Python
|
modoboa/dnstools/migrations/0001_initial.py
|
HarshCasper/modoboa
|
a00baa0593107992f545ee3e89cd4346b9615a96
|
[
"0BSD"
] | 1,602 |
2016-12-15T14:25:34.000Z
|
2022-03-31T16:49:25.000Z
|
modoboa/dnstools/migrations/0001_initial.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 1,290 |
2016-12-14T15:39:05.000Z
|
2022-03-31T13:49:09.000Z
|
modoboa/dnstools/migrations/0001_initial.py
|
sebageek/modoboa
|
57f5d57ea60a57e8dcac970085dfc07082481fc6
|
[
"0BSD"
] | 272 |
2016-12-22T11:58:18.000Z
|
2022-03-17T15:57:24.000Z
|
# Generated by Django 1.11.16 on 2018-11-19 10:07
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('admin', '0014_auto_20181017_1628'),
]
operations = [
migrations.CreateModel(
name='DNSRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('spf', 'SPF'), ('dkim', 'DKIM'), ('dmarc', 'DMARC'), ('autoconfig', 'Autoconfig'), ('autodiscover', 'Autodiscover')], max_length=15)),
('value', models.TextField(blank=True)),
('is_valid', models.BooleanField(default=False)),
('error', models.CharField(blank=True, max_length=50, null=True)),
('updated', models.DateTimeField(default=django.utils.timezone.now)),
('domain', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='admin.Domain')),
],
),
]
| 38.896552 | 186 | 0.605496 |
79489633a191977e031af7ca493e62f7cb704d94
| 7,536 |
py
|
Python
|
workshops/tfx-caip-tf21/lab-03-tfx-cicd/pipeline/pipeline.py
|
DeshuaiWang/mlops-on-gcp
|
dfc12bb774476c029e3fb884e1b4d501d674f178
|
[
"Apache-2.0"
] | null | null | null |
workshops/tfx-caip-tf21/lab-03-tfx-cicd/pipeline/pipeline.py
|
DeshuaiWang/mlops-on-gcp
|
dfc12bb774476c029e3fb884e1b4d501d674f178
|
[
"Apache-2.0"
] | null | null | null |
workshops/tfx-caip-tf21/lab-03-tfx-cicd/pipeline/pipeline.py
|
DeshuaiWang/mlops-on-gcp
|
dfc12bb774476c029e3fb884e1b4d501d674f178
|
[
"Apache-2.0"
] | 1 |
2020-05-09T05:16:33.000Z
|
2020-05-09T05:16:33.000Z
|
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Covertype training pipeline DSL."""
import os
import kfp
import tensorflow_model_analysis as tfma
from typing import Optional, Dict, List, Text
from tfx.components.base import executor_spec
from tfx.components import Evaluator
from tfx.components import CsvExampleGen
from tfx.components import ExampleValidator
from tfx.components import ImporterNode
from tfx.components import Pusher
from tfx.components import ResolverNode
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.components import Trainer
from tfx.components import Transform
from tfx.components.trainer import executor as trainer_executor
from tfx.dsl.experimental import latest_blessed_model_resolver
from tfx.extensions.google_cloud_ai_platform.pusher import executor as ai_platform_pusher_executor
from tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor
from tfx.orchestration import data_types
from tfx.orchestration import pipeline
from tfx.orchestration.kubeflow import kubeflow_dag_runner
from tfx.orchestration.kubeflow.proto import kubeflow_pb2
from tfx.proto import example_gen_pb2
from tfx.proto import evaluator_pb2
from tfx.proto import pusher_pb2
from tfx.proto import trainer_pb2
from tfx.utils.dsl_utils import external_input
from tfx.types import Channel
from tfx.types.standard_artifacts import Model
from tfx.types.standard_artifacts import ModelBlessing
from tfx.types.standard_artifacts import Schema
import features
SCHEMA_FOLDER='schema'
TRANSFORM_MODULE_FILE='preprocessing.py'
TRAIN_MODULE_FILE='model.py'
def create_pipeline(pipeline_name: Text,
pipeline_root: Text,
data_root_uri: data_types.RuntimeParameter,
train_steps: data_types.RuntimeParameter,
eval_steps: data_types.RuntimeParameter,
ai_platform_training_args: Dict[Text, Text],
ai_platform_serving_args: Dict[Text, Text],
beam_pipeline_args: List[Text],
enable_cache: Optional[bool] = False) -> pipeline.Pipeline:
"""Trains and deploys the Covertype classifier."""
# Brings data into the pipeline and splits the data into training and eval splits
examples = external_input(data_root_uri)
output_config = example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(name='train', hash_buckets=4),
example_gen_pb2.SplitConfig.Split(name='eval', hash_buckets=1)
]))
generate_examples = CsvExampleGen(input=examples)
# Computes statistics over data for visualization and example validation.
generate_statistics = StatisticsGen(examples=generate_examples.outputs.examples)
# Import a user-provided schema
import_schema = ImporterNode(
instance_name='import_user_schema',
source_uri=SCHEMA_FOLDER,
artifact_type=Schema)
# Generates schema based on statistics files.Even though, we use user-provided schema
# we still want to generate the schema of the newest data for tracking and comparison
infer_schema = SchemaGen(statistics=generate_statistics.outputs.statistics)
# Performs anomaly detection based on statistics and data schema.
validate_stats = ExampleValidator(
statistics=generate_statistics.outputs.statistics,
schema=import_schema.outputs.result)
# Performs transformations and feature engineering in training and serving.
transform = Transform(
examples=generate_examples.outputs.examples,
schema=import_schema.outputs.result,
module_file=TRANSFORM_MODULE_FILE)
# Trains the model using a user provided trainer function.
train = Trainer(
custom_executor_spec=executor_spec.ExecutorClassSpec(
ai_platform_trainer_executor.GenericExecutor),
# custom_executor_spec=executor_spec.ExecutorClassSpec(trainer_executor.GenericExecutor),
module_file=TRAIN_MODULE_FILE,
transformed_examples=transform.outputs.transformed_examples,
schema=import_schema.outputs.result,
transform_graph=transform.outputs.transform_graph,
train_args={'num_steps': train_steps},
eval_args={'num_steps': eval_steps},
custom_config={'ai_platform_training_args': ai_platform_training_args})
# Get the latest blessed model for model validation.
resolve = ResolverNode(
instance_name='latest_blessed_model_resolver',
resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,
model=Channel(type=Model),
model_blessing=Channel(type=ModelBlessing))
# Uses TFMA to compute a evaluation statistics over features of a model.
accuracy_threshold = tfma.MetricThreshold(
value_threshold=tfma.GenericValueThreshold(
lower_bound={'value': 0.5},
upper_bound={'value': 0.99}),
change_threshold=tfma.GenericChangeThreshold(
absolute={'value': 0.0001},
direction=tfma.MetricDirection.HIGHER_IS_BETTER),
)
metrics_specs = tfma.MetricsSpec(
metrics = [
tfma.MetricConfig(class_name='SparseCategoricalAccuracy',
threshold=accuracy_threshold),
tfma.MetricConfig(class_name='ExampleCount')])
eval_config = tfma.EvalConfig(
model_specs=[
tfma.ModelSpec(label_key='Cover_Type')
],
metrics_specs=[metrics_specs],
slicing_specs=[
tfma.SlicingSpec(),
tfma.SlicingSpec(feature_keys=['Wilderness_Area'])
]
)
analyze = Evaluator(
examples=generate_examples.outputs.examples,
model=train.outputs.model,
baseline_model=resolve.outputs.model,
eval_config=eval_config
)
# Checks whether the model passed the validation steps and pushes the model
# to a file destination if check passed.
deploy = Pusher(
model=train.outputs['model'],
model_blessing=analyze.outputs['blessing'],
push_destination=pusher_pb2.PushDestination(
filesystem=pusher_pb2.PushDestination.Filesystem(
base_directory=os.path.join(
str(pipeline.ROOT_PARAMETER), 'model_serving'))))
#deploy = Pusher(
# custom_executor_spec=executor_spec.ExecutorClassSpec(
# ai_platform_pusher_executor.Executor),
# model=train.outputs.model,
# model_blessing=validate.outputs.blessing,
# custom_config={'ai_platform_serving_args': ai_platform_serving_args})
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[
generate_examples, generate_statistics, import_schema, infer_schema, validate_stats, transform,
train, resolve, analyze , deploy
],
enable_cache=enable_cache,
beam_pipeline_args=beam_pipeline_args
)
| 39.663158 | 105 | 0.738588 |
794896df86b21a286f6e447f5444425ff0945e28
| 1,039 |
py
|
Python
|
fmcapi/api_objects/device_services/__init__.py
|
aegiacometti/fmcapi
|
1cce480bed69ec199180f3fe3981570570fd107d
|
[
"BSD-3-Clause"
] | 2 |
2019-07-22T15:15:38.000Z
|
2021-02-22T14:44:41.000Z
|
fmcapi/api_objects/device_services/__init__.py
|
aegiacometti/fmcapi
|
1cce480bed69ec199180f3fe3981570570fd107d
|
[
"BSD-3-Clause"
] | 1 |
2020-06-14T17:50:41.000Z
|
2020-06-14T17:50:41.000Z
|
fmcapi/api_objects/device_services/__init__.py
|
aegiacometti/fmcapi
|
1cce480bed69ec199180f3fe3981570570fd107d
|
[
"BSD-3-Clause"
] | null | null | null |
"""Device Services Classes."""
import logging
from .bridgegroupinterfaces import BridgeGroupInterfaces
from .devicerecords import Device
from .devicerecords import DeviceRecords
from .etherchannelinterfaces import EtherchannelInterfaces
from .ipv4staticroutes import IPv4StaticRoutes
from .ipv4staticroutes import IPv4StaticRoute
from .ipv6staticroutes import IPv6StaticRoutes
from .ipv6staticroutes import IPv6StaticRoute
from .physicalinterfaces import PhysicalInterfaces
from .physicalinterfaces import PhysicalInterface
from .redundantinterfaces import RedundantInterfaces
from .staticroutes import StaticRoutes
from .subinterfaces import SubInterfaces
logging.debug("In the device_services __init__.py file.")
__all__ = [
"Device",
"DeviceRecords",
"StaticRoutes",
"IPv4StaticRoutes",
"IPv4StaticRoute",
"IPv6StaticRoutes",
"IPv6StaticRoute",
"PhysicalInterfaces",
"PhysicalInterface",
"BridgeGroupInterfaces",
"RedundantInterfaces",
"EtherchannelInterfaces",
"SubInterfaces",
]
| 29.685714 | 58 | 0.80462 |
794897add74eac731c2c12ddf745f580668ee9e8
| 2,653 |
py
|
Python
|
tests/env/packages/env_test_ld_impl_linux-64_linux.py
|
swaldhoer/foxbms-2
|
ee57761ba6c2817e5335b68dfda3513b436e3abd
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 47 |
2021-04-01T21:14:55.000Z
|
2022-03-30T12:19:18.000Z
|
tests/env/packages/env_test_ld_impl_linux-64_linux.py
|
jagatheesh31/foxbms-2
|
ee57761ba6c2817e5335b68dfda3513b436e3abd
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 6 |
2021-06-01T08:25:54.000Z
|
2021-11-17T07:55:05.000Z
|
tests/env/packages/env_test_ld_impl_linux-64_linux.py
|
jagatheesh31/foxbms-2
|
ee57761ba6c2817e5335b68dfda3513b436e3abd
|
[
"CC-BY-4.0",
"BSD-3-Clause"
] | 24 |
2021-04-01T21:15:04.000Z
|
2022-03-29T06:18:10.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2010 - 2021, Fraunhofer-Gesellschaft zur Foerderung der angewandten Forschung e.V.
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# We kindly request you to use one or more of the following phrases to refer to
# foxBMS in your hardware, software, documentation or advertising materials:
#
# - "This product uses parts of foxBMS®"
# - "This product includes parts of foxBMS®"
# - "This product is derived from foxBMS®"
# pylint:disable=invalid-name
"""Testing 'ld_impl_linux-64' package"""
import logging
import argparse
def main():
"""Testing 'ld_impl_linux-64' package"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbosity",
dest="verbosity",
action="count",
default=0,
help="set verbosity level",
)
args = parser.parse_args()
if args.verbosity == 1:
logging.basicConfig(level=logging.INFO)
elif args.verbosity > 1:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.ERROR)
if __name__ == "__main__":
main()
| 36.847222 | 98 | 0.733509 |
7948984747f94d5d5b1ac8c60375e734b2b23dcf
| 36 |
py
|
Python
|
__init__.py
|
KennethBlaney/rivescript-python
|
87db472847ab526060afd9a5b8548e9689501a85
|
[
"MIT"
] | null | null | null |
__init__.py
|
KennethBlaney/rivescript-python
|
87db472847ab526060afd9a5b8548e9689501a85
|
[
"MIT"
] | null | null | null |
__init__.py
|
KennethBlaney/rivescript-python
|
87db472847ab526060afd9a5b8548e9689501a85
|
[
"MIT"
] | null | null | null |
from converscript import RiveScript
| 18 | 35 | 0.888889 |
794898ef6350e441d7a8ba0ec37671df43872f0f
| 381 |
py
|
Python
|
microproject/RX/temp3.py
|
jithinsankar/projectLI-FI
|
965ca33eddec6e026e26fd3451905ded1f5dc0aa
|
[
"MIT"
] | null | null | null |
microproject/RX/temp3.py
|
jithinsankar/projectLI-FI
|
965ca33eddec6e026e26fd3451905ded1f5dc0aa
|
[
"MIT"
] | null | null | null |
microproject/RX/temp3.py
|
jithinsankar/projectLI-FI
|
965ca33eddec6e026e26fd3451905ded1f5dc0aa
|
[
"MIT"
] | null | null | null |
import numpy as np
import base64
a=[]
a.append('a')
a.append('G')
a.append('U')
a.append('K')
image_64_decode=list(a)
image_64_decode=''.join(image_64_decode)
image_64_decode = base64.b64decode(image_64_decode)
print image_64_decode
image_result = open('t.txt', 'wb') # create a writable image and write the decoding result
image_result.write(image_64_decode)
image_result.close()
| 25.4 | 90 | 0.774278 |
79489a1bd6868dcbe826a12f7647823d34faa774
| 35,700 |
py
|
Python
|
salt/cloud/clouds/profitbricks.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1 |
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/cloud/clouds/profitbricks.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/cloud/clouds/profitbricks.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1 |
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
ProfitBricks Cloud Module
=========================
The ProfitBricks SaltStack cloud module allows a ProfitBricks server to
be automatically deployed and bootstraped with Salt.
:depends: profitbrick >= 3.1.0
The module requires ProfitBricks credentials to be supplied along with
an existing virtual datacenter UUID where the server resources will
reside. The server should also be assigned a public LAN, a private LAN,
or both along with SSH key pairs.
...
Set up the cloud configuration at ``/etc/salt/cloud.providers`` or
``/etc/salt/cloud.providers.d/profitbricks.conf``:
.. code-block:: yaml
my-profitbricks-config:
driver: profitbricks
# The ProfitBricks login username
username: user@example.com
# The ProfitBricks login password
password: secretpassword
# The ProfitBricks virtual datacenter UUID
datacenter_id: <UUID>
# SSH private key filename
ssh_private_key: /path/to/private.key
# SSH public key filename
ssh_public_key: /path/to/public.key
.. code-block:: yaml
my-profitbricks-profile:
provider: my-profitbricks-config
# Name of a predefined server size.
size: Micro Instance
# Assign CPU family to server.
cpu_family: INTEL_XEON
# Number of CPU cores to allocate to node (overrides server size).
cores: 4
# Amount of RAM in multiples of 256 MB (overrides server size).
ram: 4096
# The server availability zone.
availability_zone: ZONE_1
# Name or UUID of the HDD image to use.
image: <UUID>
# Image alias could be provided instead of image.
# Example 'ubuntu:latest'
#image_alias: <IMAGE_ALIAS>
# Size of the node disk in GB (overrides server size).
disk_size: 40
# Type of disk (HDD or SSD).
disk_type: SSD
# Storage availability zone to use.
disk_availability_zone: ZONE_2
# Assign the server to the specified public LAN.
public_lan: <ID>
# Assign firewall rules to the network interface.
public_firewall_rules:
SSH:
protocol: TCP
port_range_start: 22
port_range_end: 22
# Assign the server to the specified private LAN.
private_lan: <ID>
# Enable NAT on the private NIC.
nat: true
# Assign additional volumes to the server.
volumes:
data-volume:
disk_size: 500
disk_availability_zone: ZONE_3
log-volume:
disk_size: 50
disk_type: SSD
To use a private IP for connecting and bootstrapping node:
.. code-block:: yaml
my-profitbricks-profile:
ssh_interface: private_lan
Set ``deploy`` to False if Salt should not be installed on the node.
.. code-block:: yaml
my-profitbricks-profile:
deploy: False
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import pprint
import time
from salt.utils.versions import LooseVersion
# Import salt libs
import salt.utils.cloud
import salt.utils.files
import salt.utils.stringutils
import salt.config as config
from salt.exceptions import (
SaltCloudConfigError,
SaltCloudNotFound,
SaltCloudExecutionFailure,
SaltCloudExecutionTimeout,
SaltCloudSystemExit
)
# Import 3rd-party libs
from salt.ext import six
try:
# pylint: disable=no-name-in-module
import profitbricks
from profitbricks.client import (
ProfitBricksService, Server,
NIC, Volume, FirewallRule, IPBlock,
Datacenter, LoadBalancer, LAN,
PBNotFoundError, PBError
)
# pylint: enable=no-name-in-module
HAS_PROFITBRICKS = True
except ImportError:
HAS_PROFITBRICKS = False
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = 'profitbricks'
# Only load in this module if the ProfitBricks configurations are in place
def __virtual__():
'''
Check for ProfitBricks configurations.
'''
if get_configured_provider() is False:
return False
if get_dependencies() is False:
return False
return __virtualname__
def get_configured_provider():
'''
Return the first configured instance.
'''
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
('username', 'password', 'datacenter_id')
)
def version_compatible(version):
'''
Checks profitbricks version
'''
return LooseVersion(profitbricks.API_VERSION) >= LooseVersion(version)
def get_dependencies():
'''
Warn if dependencies are not met.
'''
return config.check_driver_dependencies(
__virtualname__,
{'profitbricks': HAS_PROFITBRICKS}
)
def get_conn():
'''
Return a conn object for the passed VM data
'''
return ProfitBricksService(
username=config.get_cloud_config_value(
'username',
get_configured_provider(),
__opts__,
search_global=False
),
password=config.get_cloud_config_value(
'password',
get_configured_provider(),
__opts__,
search_global=False
)
)
def avail_locations(call=None):
'''
Return a dict of all available VM locations on the cloud provider with
relevant data
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-locations option'
)
ret = {}
conn = get_conn()
for item in conn.list_locations()['items']:
reg, loc = item['id'].split('/')
location = {'id': item['id']}
if reg not in ret:
ret[reg] = {}
ret[reg][loc] = location
return ret
def avail_images(call=None):
'''
Return a list of the images that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-images option'
)
ret = {}
conn = get_conn()
for item in conn.list_images()['items']:
image = {'id': item['id']}
image.update(item['properties'])
ret[image['name']] = image
return ret
def list_images(call=None, kwargs=None):
'''
List all the images with alias by location
CLI Example:
.. code-block:: bash
salt-cloud -f list_images my-profitbricks-config location=us/las
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_images function must be called with '
'-f or --function.'
)
if not version_compatible('4.0'):
raise SaltCloudNotFound(
"The 'image_alias' feature requires the profitbricks "
"SDK v4.0.0 or greater."
)
ret = {}
conn = get_conn()
if kwargs.get('location') is not None:
item = conn.get_location(kwargs.get('location'), 3)
ret[item['id']] = {'image_alias': item['properties']['imageAliases']}
return ret
for item in conn.list_locations(3)['items']:
ret[item['id']] = {'image_alias': item['properties']['imageAliases']}
return ret
def avail_sizes(call=None):
'''
Return a dict of all available VM sizes on the cloud provider with
relevant data. Latest version can be found at:
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_sizes function must be called with '
'-f or --function, or with the --list-sizes option'
)
sizes = {
'Micro Instance': {
'id': '1',
'ram': 1024,
'disk': 50,
'cores': 1
},
'Small Instance': {
'id': '2',
'ram': 2048,
'disk': 50,
'cores': 1
},
'Medium Instance': {
'id': '3',
'ram': 4096,
'disk': 50,
'cores': 2
},
'Large Instance': {
'id': '4',
'ram': 7168,
'disk': 50,
'cores': 4
},
'Extra Large Instance': {
'id': '5',
'ram': 14336,
'disk': 50,
'cores': 8
},
'Memory Intensive Instance Medium': {
'id': '6',
'ram': 28672,
'disk': 50,
'cores': 4
},
'Memory Intensive Instance Large': {
'id': '7',
'ram': 57344,
'disk': 50,
'cores': 8
}
}
return sizes
def get_size(vm_):
'''
Return the VM's size object
'''
vm_size = config.get_cloud_config_value('size', vm_, __opts__)
sizes = avail_sizes()
if not vm_size:
return sizes['Small Instance']
for size in sizes:
combinations = (six.text_type(sizes[size]['id']), six.text_type(size))
if vm_size and six.text_type(vm_size) in combinations:
return sizes[size]
raise SaltCloudNotFound(
'The specified size, \'{0}\', could not be found.'.format(vm_size)
)
def get_datacenter_id():
'''
Return datacenter ID from provider configuration
'''
datacenter_id = config.get_cloud_config_value(
'datacenter_id',
get_configured_provider(),
__opts__,
search_global=False
)
conn = get_conn()
try:
conn.get_datacenter(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get datacenter: %s', datacenter_id)
raise
return datacenter_id
def list_loadbalancers(call=None):
'''
Return a list of the loadbalancers that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The avail_images function must be called with '
'-f or --function, or with the --list-loadbalancers option'
)
ret = {}
conn = get_conn()
datacenter = get_datacenter(conn)
for item in conn.list_loadbalancers(datacenter['id'])['items']:
lb = {'id': item['id']}
lb.update(item['properties'])
ret[lb['name']] = lb
return ret
def create_loadbalancer(call=None, kwargs=None):
'''
Creates a loadbalancer within the datacenter from the provider config.
CLI Example:
.. code-block:: bash
salt-cloud -f create_loadbalancer profitbricks name=mylb
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_address function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
conn = get_conn()
datacenter_id = get_datacenter_id()
loadbalancer = LoadBalancer(name=kwargs.get('name'),
ip=kwargs.get('ip'),
dhcp=kwargs.get('dhcp'))
response = conn.create_loadbalancer(datacenter_id, loadbalancer)
_wait_for_completion(conn, response, 60, 'loadbalancer')
return response
def get_datacenter(conn):
'''
Return the datacenter from the config provider datacenter ID
'''
datacenter_id = get_datacenter_id()
for item in conn.list_datacenters()['items']:
if item['id'] == datacenter_id:
return item
raise SaltCloudNotFound(
'The specified datacenter \'{0}\' could not be found.'.format(
datacenter_id
)
)
def create_datacenter(call=None, kwargs=None):
'''
Creates a virtual datacenter based on supplied parameters.
CLI Example:
.. code-block:: bash
salt-cloud -f create_datacenter profitbricks name=mydatacenter
location=us/las description="my description"
'''
if call != 'function':
raise SaltCloudSystemExit(
'The create_address function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
if kwargs.get('name') is None:
raise SaltCloudExecutionFailure('The "name" parameter is required')
if kwargs.get('location') is None:
raise SaltCloudExecutionFailure('The "location" parameter is required')
conn = get_conn()
datacenter = Datacenter(name=kwargs['name'],
location=kwargs['location'],
description=kwargs.get('description'))
response = conn.create_datacenter(datacenter)
_wait_for_completion(conn, response, 60, 'create_datacenter')
return response
def get_disk_type(vm_):
'''
Return the type of disk to use. Either 'HDD' (default) or 'SSD'.
'''
return config.get_cloud_config_value(
'disk_type', vm_, __opts__, default='HDD',
search_global=False
)
def get_wait_timeout(vm_):
'''
Return the wait_for_timeout for resource provisioning.
'''
return config.get_cloud_config_value(
'wait_for_timeout', vm_, __opts__, default=15 * 60,
search_global=False
)
def get_image(vm_):
'''
Return the image object to use
'''
vm_image = config.get_cloud_config_value('image', vm_, __opts__).encode(
'ascii', 'salt-cloud-force-ascii'
)
images = avail_images()
for key in six.iterkeys(images):
if vm_image and vm_image in (images[key]['id'], images[key]['name']):
return images[key]
raise SaltCloudNotFound(
'The specified image, \'{0}\', could not be found.'.format(vm_image)
)
def list_datacenters(conn=None, call=None):
'''
List all the data centers
CLI Example:
.. code-block:: bash
salt-cloud -f list_datacenters my-profitbricks-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_datacenters function must be called with '
'-f or --function.'
)
datacenters = []
if not conn:
conn = get_conn()
for item in conn.list_datacenters()['items']:
datacenter = {'id': item['id']}
datacenter.update(item['properties'])
datacenters.append({item['properties']['name']: datacenter})
return {'Datacenters': datacenters}
def list_nodes(conn=None, call=None):
'''
Return a list of VMs that are on the provider
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
ret = {}
datacenter_id = get_datacenter_id()
try:
nodes = conn.list_servers(datacenter_id=datacenter_id)
except PBNotFoundError:
log.error('Failed to get nodes list '
'from datacenter: %s', datacenter_id)
raise
for item in nodes['items']:
node = {'id': item['id']}
node.update(item['properties'])
node['state'] = node.pop('vmState')
ret[node['name']] = node
return ret
def list_nodes_full(conn=None, call=None):
'''
Return a list of the VMs that are on the provider, with all fields
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_full function must be called with -f or '
'--function.'
)
if not conn:
conn = get_conn() # pylint: disable=E0602
ret = {}
datacenter_id = get_datacenter_id()
nodes = conn.list_servers(datacenter_id=datacenter_id, depth=3)
for item in nodes['items']:
node = {'id': item['id']}
node.update(item['properties'])
node['state'] = node.pop('vmState')
node['public_ips'] = []
node['private_ips'] = []
if item['entities']['nics']['items'] > 0:
for nic in item['entities']['nics']['items']:
if nic['properties']['ips']:
pass
ip_address = nic['properties']['ips'][0]
if salt.utils.cloud.is_public_ip(ip_address):
node['public_ips'].append(ip_address)
else:
node['private_ips'].append(ip_address)
ret[node['name']] = node
__utils__['cloud.cache_node_list'](
ret,
__active_provider_name__.split(':')[0],
__opts__
)
return ret
def reserve_ipblock(call=None, kwargs=None):
'''
Reserve the IP Block
'''
if call == 'action':
raise SaltCloudSystemExit(
'The reserve_ipblock function must be called with -f or '
'--function.'
)
conn = get_conn()
if kwargs is None:
kwargs = {}
ret = {}
ret['ips'] = []
if kwargs.get('location') is None:
raise SaltCloudExecutionFailure('The "location" parameter is required')
location = kwargs.get('location')
size = 1
if kwargs.get('size') is not None:
size = kwargs.get('size')
block = conn.reserve_ipblock(IPBlock(size=size, location=location))
for item in block['properties']['ips']:
ret['ips'].append(item)
return ret
def show_instance(name, call=None):
'''
Show the details from the provider concerning an instance
'''
if call != 'action':
raise SaltCloudSystemExit(
'The show_instance action must be called with -a or --action.'
)
nodes = list_nodes_full()
__utils__['cloud.cache_node'](
nodes[name],
__active_provider_name__,
__opts__
)
return nodes[name]
def get_node(conn, name):
'''
Return a node for the named VM
'''
datacenter_id = get_datacenter_id()
for item in conn.list_servers(datacenter_id)['items']:
if item['properties']['name'] == name:
node = {'id': item['id']}
node.update(item['properties'])
return node
def ssh_interface(vm_):
'''
Return the ssh_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
'''
return config.get_cloud_config_value(
'ssh_interface', vm_, __opts__, default='public_ips',
search_global=False
)
def _get_nics(vm_):
'''
Create network interfaces on appropriate LANs as defined in cloud profile.
'''
nics = []
if 'public_lan' in vm_:
firewall_rules = []
# Set LAN to public if it already exists, otherwise create a new
# public LAN.
if 'public_firewall_rules' in vm_:
firewall_rules = _get_firewall_rules(vm_['public_firewall_rules'])
nic = NIC(lan=set_public_lan(int(vm_['public_lan'])),
name='public',
firewall_rules=firewall_rules)
if 'public_ips' in vm_:
nic.ips = _get_ip_addresses(vm_['public_ips'])
nics.append(nic)
if 'private_lan' in vm_:
firewall_rules = []
if 'private_firewall_rules' in vm_:
firewall_rules = _get_firewall_rules(vm_['private_firewall_rules'])
nic = NIC(lan=int(vm_['private_lan']),
name='private',
firewall_rules=firewall_rules)
if 'private_ips' in vm_:
nic.ips = _get_ip_addresses(vm_['private_ips'])
if 'nat' in vm_ and 'private_ips' not in vm_:
nic.nat = vm_['nat']
nics.append(nic)
return nics
def set_public_lan(lan_id):
'''
Enables public Internet access for the specified public_lan. If no public
LAN is available, then a new public LAN is created.
'''
conn = get_conn()
datacenter_id = get_datacenter_id()
try:
lan = conn.get_lan(datacenter_id=datacenter_id, lan_id=lan_id)
if not lan['properties']['public']:
conn.update_lan(datacenter_id=datacenter_id,
lan_id=lan_id,
public=True)
return lan['id']
except Exception: # pylint: disable=broad-except
lan = conn.create_lan(datacenter_id,
LAN(public=True,
name='Public LAN'))
return lan['id']
def get_public_keys(vm_):
'''
Retrieve list of SSH public keys.
'''
key_filename = config.get_cloud_config_value(
'ssh_public_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None:
key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_public_key \'{0}\' does not exist'.format(
key_filename
)
)
ssh_keys = []
with salt.utils.files.fopen(key_filename) as rfh:
for key in rfh.readlines():
ssh_keys.append(salt.utils.stringutils.to_unicode(key))
return ssh_keys
def get_key_filename(vm_):
'''
Check SSH private key file and return absolute path if exists.
'''
key_filename = config.get_cloud_config_value(
'ssh_private_key', vm_, __opts__, search_global=False, default=None
)
if key_filename is not None:
key_filename = os.path.expanduser(key_filename)
if not os.path.isfile(key_filename):
raise SaltCloudConfigError(
'The defined ssh_private_key \'{0}\' does not exist'.format(
key_filename
)
)
return key_filename
def signal_event(vm_, event, description):
args = __utils__['cloud.filter_event'](
event,
vm_,
['name', 'profile', 'provider', 'driver']
)
__utils__['cloud.fire_event'](
'event',
description,
'salt/cloud/{0}/creating'.format(vm_['name']),
args=args,
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
def create(vm_):
'''
Create a single VM from a data dict
'''
try:
# Check for required profile parameters before sending any API calls.
if (vm_['profile'] and
config.is_profile_configured(__opts__,
(__active_provider_name__ or
'profitbricks'),
vm_['profile']) is False):
return False
except AttributeError:
pass
if 'image_alias' in vm_ and not version_compatible('4.0'):
raise SaltCloudNotFound(
"The 'image_alias' parameter requires the profitbricks "
"SDK v4.0.0 or greater."
)
if 'image' not in vm_ and 'image_alias' not in vm_:
log.error('The image or image_alias parameter is required.')
signal_event(vm_, 'creating', 'starting create')
data = None
datacenter_id = get_datacenter_id()
conn = get_conn()
# Assemble list of network interfaces from the cloud profile config.
nics = _get_nics(vm_)
# Assemble list of volumes from the cloud profile config.
volumes = [_get_system_volume(vm_)]
if 'volumes' in vm_:
volumes.extend(_get_data_volumes(vm_))
# Assembla the composite server object.
server = _get_server(vm_, volumes, nics)
signal_event(vm_, 'requesting', 'requesting instance')
try:
data = conn.create_server(datacenter_id=datacenter_id, server=server)
log.info(
'Create server request ID: %s',
data['requestId'], exc_info_on_loglevel=logging.DEBUG
)
_wait_for_completion(conn, data, get_wait_timeout(vm_),
'create_server')
except PBError as exc:
log.error(
'Error creating %s on ProfitBricks\n\n'
'The following exception was thrown by the profitbricks library '
'when trying to run the initial deployment: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
except Exception as exc: # pylint: disable=W0703
log.error(
'Error creating %s \n\nError: \n%s',
vm_['name'], exc, exc_info_on_loglevel=logging.DEBUG
)
return False
vm_['server_id'] = data['id']
def __query_node_data(vm_, data):
'''
Query node data until node becomes available.
'''
running = False
try:
data = show_instance(vm_['name'], 'action')
if not data:
return False
log.debug(
'Loaded node data for %s:\nname: %s\nstate: %s',
vm_['name'], pprint.pformat(data['name']), data['state']
)
except Exception as err: # pylint: disable=broad-except
log.error(
'Failed to get nodes list: %s', err,
# Show the trackback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
# Trigger a failure in the wait for IP function
return False
running = data['state'] == 'RUNNING'
if not running:
# Still not running, trigger another iteration
return
if ssh_interface(vm_) == 'private_lan' and data['private_ips']:
vm_['ssh_host'] = data['private_ips'][0]
if ssh_interface(vm_) != 'private_lan' and data['public_ips']:
vm_['ssh_host'] = data['public_ips'][0]
return data
try:
data = salt.utils.cloud.wait_for_ip(
__query_node_data,
update_args=(vm_, data),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc.message))
log.debug('VM is now running')
log.info('Created Cloud VM %s', vm_)
log.debug('%s VM creation details:\n%s', vm_, pprint.pformat(data))
signal_event(vm_, 'created', 'created instance')
if 'ssh_host' in vm_:
vm_['key_filename'] = get_key_filename(vm_)
ret = __utils__['cloud.bootstrap'](vm_, __opts__)
ret.update(data)
return ret
else:
raise SaltCloudSystemExit('A valid IP address was not found.')
def destroy(name, call=None):
'''
destroy a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: array of booleans , true if successfully stopped and true if
successfully removed
CLI Example:
.. code-block:: bash
salt-cloud -d vm_name
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
attached_volumes = None
delete_volumes = config.get_cloud_config_value(
'delete_volumes',
get_configured_provider(),
__opts__,
search_global=False
)
# Get volumes before the server is deleted
attached_volumes = conn.get_attached_volumes(
datacenter_id=datacenter_id,
server_id=node['id']
)
conn.delete_server(datacenter_id=datacenter_id, server_id=node['id'])
# The server is deleted and now is safe to delete the volumes
if delete_volumes:
for vol in attached_volumes['items']:
log.debug('Deleting volume %s', vol['id'])
conn.delete_volume(
datacenter_id=datacenter_id,
volume_id=vol['id']
)
log.debug('Deleted volume %s', vol['id'])
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](
name,
__active_provider_name__.split(':')[0],
__opts__
)
return True
def reboot(name, call=None):
'''
reboot a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a reboot vm_name
'''
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.reboot_server(datacenter_id=datacenter_id, server_id=node['id'])
return True
def stop(name, call=None):
'''
stop a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.stop_server(datacenter_id=datacenter_id, server_id=node['id'])
return True
def start(name, call=None):
'''
start a machine by name
:param name: name given to the machine
:param call: call value in this case is 'action'
:return: true if successful
CLI Example:
.. code-block:: bash
salt-cloud -a start vm_name
'''
datacenter_id = get_datacenter_id()
conn = get_conn()
node = get_node(conn, name)
conn.start_server(datacenter_id=datacenter_id, server_id=node['id'])
return True
def _override_size(vm_):
'''
Apply any extra component overrides to VM from the cloud profile.
'''
vm_size = get_size(vm_)
if 'cores' in vm_:
vm_size['cores'] = vm_['cores']
if 'ram' in vm_:
vm_size['ram'] = vm_['ram']
return vm_size
def _get_server(vm_, volumes, nics):
'''
Construct server instance from cloud profile config
'''
# Apply component overrides to the size from the cloud profile config
vm_size = _override_size(vm_)
# Set the server availability zone from the cloud profile config
availability_zone = config.get_cloud_config_value(
'availability_zone', vm_, __opts__, default=None,
search_global=False
)
# Assign CPU family from the cloud profile config
cpu_family = config.get_cloud_config_value(
'cpu_family', vm_, __opts__, default=None,
search_global=False
)
# Contruct server object
return Server(
name=vm_['name'],
ram=vm_size['ram'],
availability_zone=availability_zone,
cores=vm_size['cores'],
cpu_family=cpu_family,
create_volumes=volumes,
nics=nics
)
def _get_system_volume(vm_):
'''
Construct VM system volume list from cloud profile config
'''
# Override system volume size if 'disk_size' is defined in cloud profile
disk_size = get_size(vm_)['disk']
if 'disk_size' in vm_:
disk_size = vm_['disk_size']
# Construct the system volume
volume = Volume(
name='{0} Storage'.format(vm_['name']),
size=disk_size,
disk_type=get_disk_type(vm_)
)
if 'image_password' in vm_:
image_password = vm_['image_password']
volume.image_password = image_password
# Retrieve list of SSH public keys
ssh_keys = get_public_keys(vm_)
volume.ssh_keys = ssh_keys
if 'image_alias' in vm_.keys():
volume.image_alias = vm_['image_alias']
else:
volume.image = get_image(vm_)['id']
# Set volume availability zone if defined in the cloud profile
if 'disk_availability_zone' in vm_:
volume.availability_zone = vm_['disk_availability_zone']
return volume
def _get_data_volumes(vm_):
'''
Construct a list of optional data volumes from the cloud profile
'''
ret = []
volumes = vm_['volumes']
for key, value in six.iteritems(volumes):
# Verify the required 'disk_size' property is present in the cloud
# profile config
if 'disk_size' not in volumes[key].keys():
raise SaltCloudConfigError(
'The volume \'{0}\' is missing \'disk_size\''.format(key)
)
# Use 'HDD' if no 'disk_type' property is present in cloud profile
if 'disk_type' not in volumes[key].keys():
volumes[key]['disk_type'] = 'HDD'
# Construct volume object and assign to a list.
volume = Volume(
name=key,
size=volumes[key]['disk_size'],
disk_type=volumes[key]['disk_type'],
licence_type='OTHER'
)
# Set volume availability zone if defined in the cloud profile
if 'disk_availability_zone' in volumes[key].keys():
volume.availability_zone = volumes[key]['disk_availability_zone']
ret.append(volume)
return ret
def _get_ip_addresses(ip_addresses):
'''
Construct a list of ip address
'''
ret = []
for item in ip_addresses:
ret.append(item)
return ret
def _get_firewall_rules(firewall_rules):
'''
Construct a list of optional firewall rules from the cloud profile.
'''
ret = []
for key, value in six.iteritems(firewall_rules):
# Verify the required 'protocol' property is present in the cloud
# profile config
if 'protocol' not in firewall_rules[key].keys():
raise SaltCloudConfigError(
'The firewall rule \'{0}\' is missing \'protocol\''.format(key)
)
ret.append(FirewallRule(
name=key,
protocol=firewall_rules[key].get('protocol', None),
source_mac=firewall_rules[key].get('source_mac', None),
source_ip=firewall_rules[key].get('source_ip', None),
target_ip=firewall_rules[key].get('target_ip', None),
port_range_start=firewall_rules[key].get('port_range_start', None),
port_range_end=firewall_rules[key].get('port_range_end', None),
icmp_type=firewall_rules[key].get('icmp_type', None),
icmp_code=firewall_rules[key].get('icmp_code', None)
))
return ret
def _wait_for_completion(conn, promise, wait_timeout, msg):
'''
Poll request status until resource is provisioned.
'''
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = conn.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
"Request: {0}, requestId: {1} failed to complete:\n{2}".format(
msg, six.text_type(promise['requestId']),
operation_result['metadata']['message']
)
)
raise Exception(
'Timed out waiting for asynchronous operation {0} "{1}" to complete.'.format(
msg, six.text_type(promise['requestId'])
)
)
| 27.803738 | 85 | 0.599412 |
79489adb5cae22eddb65ddd02db53d56c314c1ea
| 14,190 |
py
|
Python
|
plugins/ledger/auth2fa.py
|
namuyan/electrum-fjc
|
1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2
|
[
"MIT"
] | 1 |
2017-07-06T03:03:25.000Z
|
2017-07-06T03:03:25.000Z
|
plugins/ledger/auth2fa.py
|
namuyan/electrum-fjc
|
1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2
|
[
"MIT"
] | null | null | null |
plugins/ledger/auth2fa.py
|
namuyan/electrum-fjc
|
1a5c4a582f0fcdbaeca2b721ee729f43cd7915a2
|
[
"MIT"
] | null | null | null |
import threading
from PyQt4.Qt import (QDialog, QInputDialog, QLineEdit, QTextEdit, QVBoxLayout, QLabel, SIGNAL)
import PyQt4.QtCore as QtCore
from electrum_ltc.i18n import _
from electrum_ltc.qt.util import *
from electrum_ltc.util import print_msg
import os, hashlib, websocket, threading, logging, json, copy
from electrum_ltc_gui.qt.qrcodewidget import QRCodeWidget, QRDialog
from btchip.btchip import *
DEBUG = False
helpTxt = [_("Your Ledger Wallet wants tell you a one-time PIN code.<br><br>" \
"For best security you should unplug your device, open a text editor on another computer, " \
"put your cursor into it, and plug your device into that computer. " \
"It will output a summary of the transaction being signed and a one-time PIN.<br><br>" \
"Verify the transaction summary and type the PIN code here.<br><br>" \
"Before pressing enter, plug the device back into this computer.<br>" ),
_("Verify the address below.<br>Type the character from your security card corresponding to the <u><b>BOLD</b></u> character."),
_("Waiting for authentication on your mobile phone"),
_("Transaction accepted by mobile phone. Waiting for confirmation."),
_("Click Pair button to begin pairing a mobile phone."),
_("Scan this QR code with your LedgerWallet phone app to pair it with this Ledger device.<br>"
"To complete pairing you will need your security card to answer a challenge." )
]
class LedgerAuthDialog(QDialog):
def __init__(self, handler, data):
'''Ask user for 2nd factor authentication. Support text, security card and paired mobile methods.
Use last method from settings, but support new pairing and downgrade.
'''
QDialog.__init__(self, handler.top_level_window())
self.handler = handler
self.txdata = data
self.idxs = self.txdata['keycardData'] if self.txdata['confirmationType'] > 1 else ''
self.setMinimumWidth(600)
self.setWindowTitle(_("Ledger Wallet Authentication"))
self.cfg = copy.deepcopy(self.handler.win.wallet.get_keystore().cfg)
self.dongle = self.handler.win.wallet.get_keystore().get_client().dongle
self.ws = None
self.pin = ''
self.devmode = self.getDevice2FAMode()
if self.devmode == 0x11 or self.txdata['confirmationType'] == 1:
self.cfg['mode'] = 0
vbox = QVBoxLayout()
self.setLayout(vbox)
def on_change_mode(idx):
if idx < 2 and self.ws:
self.ws.stop()
self.ws = None
self.cfg['mode'] = 0 if self.devmode == 0x11 else idx if idx > 0 else 1
if self.cfg['mode'] > 1 and self.cfg['pair'] and not self.ws:
self.req_validation()
if self.cfg['mode'] > 0:
self.handler.win.wallet.get_keystore().cfg = self.cfg
self.handler.win.wallet.save_keystore()
self.update_dlg()
def add_pairing():
self.do_pairing()
def return_pin():
self.pin = self.pintxt.text() if self.txdata['confirmationType'] == 1 else self.cardtxt.text()
if self.cfg['mode'] == 1:
self.pin = ''.join(chr(int(str(i),16)) for i in self.pin)
self.accept()
self.modebox = QWidget()
modelayout = QHBoxLayout()
self.modebox.setLayout(modelayout)
modelayout.addWidget(QLabel(_("Method:")))
self.modes = QComboBox()
modelayout.addWidget(self.modes, 2)
self.addPair = QPushButton(_("Pair"))
self.addPair.setMaximumWidth(60)
modelayout.addWidget(self.addPair)
modelayout.addStretch(1)
self.modebox.setMaximumHeight(50)
vbox.addWidget(self.modebox)
self.populate_modes()
self.modes.currentIndexChanged.connect(on_change_mode)
self.addPair.clicked.connect(add_pairing)
self.helpmsg = QTextEdit()
self.helpmsg.setStyleSheet("QTextEdit { background-color: lightgray; }")
self.helpmsg.setReadOnly(True)
vbox.addWidget(self.helpmsg)
self.pinbox = QWidget()
pinlayout = QHBoxLayout()
self.pinbox.setLayout(pinlayout)
self.pintxt = QLineEdit()
self.pintxt.setEchoMode(2)
self.pintxt.setMaxLength(4)
self.pintxt.returnPressed.connect(return_pin)
pinlayout.addWidget(QLabel(_("Enter PIN:")))
pinlayout.addWidget(self.pintxt)
pinlayout.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
pinlayout.addStretch(1)
self.pinbox.setVisible(self.cfg['mode'] == 0)
vbox.addWidget(self.pinbox)
self.cardbox = QWidget()
card = QVBoxLayout()
self.cardbox.setLayout(card)
self.addrtext = QTextEdit()
self.addrtext.setStyleSheet("QTextEdit { color:blue; background-color:lightgray; padding:15px 10px; border:none; font-size:20pt; }")
self.addrtext.setReadOnly(True)
self.addrtext.setMaximumHeight(120)
card.addWidget(self.addrtext)
def pin_changed(s):
if len(s) < len(self.idxs):
i = self.idxs[len(s)]
addr = self.txdata['address']
addr = addr[:i] + '<u><b>' + addr[i:i+1] + '</u></b>' + addr[i+1:]
self.addrtext.setHtml(str(addr))
else:
self.addrtext.setHtml(_("Press Enter"))
pin_changed('')
cardpin = QHBoxLayout()
cardpin.addWidget(QLabel(_("Enter PIN:")))
self.cardtxt = QLineEdit()
self.cardtxt.setEchoMode(2)
self.cardtxt.setMaxLength(len(self.idxs))
self.cardtxt.textChanged.connect(pin_changed)
self.cardtxt.returnPressed.connect(return_pin)
cardpin.addWidget(self.cardtxt)
cardpin.addWidget(QLabel(_("NOT DEVICE PIN - see above")))
cardpin.addStretch(1)
card.addLayout(cardpin)
self.cardbox.setVisible(self.cfg['mode'] == 1)
vbox.addWidget(self.cardbox)
self.pairbox = QWidget()
pairlayout = QVBoxLayout()
self.pairbox.setLayout(pairlayout)
pairhelp = QTextEdit(helpTxt[5])
pairhelp.setStyleSheet("QTextEdit { background-color: lightgray; }")
pairhelp.setReadOnly(True)
pairlayout.addWidget(pairhelp, 1)
self.pairqr = QRCodeWidget()
pairlayout.addWidget(self.pairqr, 4)
self.pairbox.setVisible(False)
vbox.addWidget(self.pairbox)
self.update_dlg()
if self.cfg['mode'] > 1 and not self.ws:
self.req_validation()
def populate_modes(self):
self.modes.blockSignals(True)
self.modes.clear()
self.modes.addItem(_("Summary Text PIN (requires dongle replugging)") if self.txdata['confirmationType'] == 1 else _("Summary Text PIN is Disabled"))
if self.txdata['confirmationType'] > 1:
self.modes.addItem(_("Security Card Challenge"))
if not self.cfg['pair']:
self.modes.addItem(_("Mobile - Not paired"))
else:
self.modes.addItem(_("Mobile - %s") % self.cfg['pair'][1])
self.modes.blockSignals(False)
def update_dlg(self):
self.modes.setCurrentIndex(self.cfg['mode'])
self.modebox.setVisible(True)
self.addPair.setText(_("Pair") if not self.cfg['pair'] else _("Re-Pair"))
self.addPair.setVisible(self.txdata['confirmationType'] > 2)
self.helpmsg.setText(helpTxt[self.cfg['mode'] if self.cfg['mode'] < 2 else 2 if self.cfg['pair'] else 4])
self.helpmsg.setMinimumHeight(180 if self.txdata['confirmationType'] == 1 else 100)
self.pairbox.setVisible(False)
self.helpmsg.setVisible(True)
self.pinbox.setVisible(self.cfg['mode'] == 0)
self.cardbox.setVisible(self.cfg['mode'] == 1)
self.pintxt.setFocus(True) if self.cfg['mode'] == 0 else self.cardtxt.setFocus(True)
self.setMaximumHeight(200)
def do_pairing(self):
rng = os.urandom(16)
pairID = rng.encode('hex') + hashlib.sha256(rng).digest()[0].encode('hex')
self.pairqr.setData(pairID)
self.modebox.setVisible(False)
self.helpmsg.setVisible(False)
self.pinbox.setVisible(False)
self.cardbox.setVisible(False)
self.pairbox.setVisible(True)
self.pairqr.setMinimumSize(300,300)
if self.ws:
self.ws.stop()
self.ws = LedgerWebSocket(self, pairID)
self.ws.pairing_done.connect(self.pairing_done)
self.ws.start()
def pairing_done(self, data):
if data is not None:
self.cfg['pair'] = [ data['pairid'], data['name'], data['platform'] ]
self.cfg['mode'] = 2
self.handler.win.wallet.get_keystore().cfg = self.cfg
self.handler.win.wallet.save_keystore()
self.pin = 'paired'
self.accept()
def req_validation(self):
if self.cfg['pair'] and 'secureScreenData' in self.txdata:
if self.ws:
self.ws.stop()
self.ws = LedgerWebSocket(self, self.cfg['pair'][0], self.txdata)
self.ws.req_updated.connect(self.req_updated)
self.ws.start()
def req_updated(self, pin):
if pin == 'accepted':
self.helpmsg.setText(helpTxt[3])
else:
self.pin = str(pin)
self.accept()
def getDevice2FAMode(self):
apdu = [0xe0, 0x24, 0x01, 0x00, 0x00, 0x01] # get 2fa mode
try:
mode = self.dongle.exchange( bytearray(apdu) )
return mode
except BTChipException, e:
debug_msg('Device getMode Failed')
return 0x11
def closeEvent(self, evnt):
debug_msg("CLOSE - Stop WS")
if self.ws:
self.ws.stop()
if self.pairbox.isVisible():
evnt.ignore()
self.update_dlg()
class LedgerWebSocket(QThread):
pairing_done = pyqtSignal(object)
req_updated = pyqtSignal(str)
def __init__(self, dlg, pairID, txdata=None):
QThread.__init__(self)
self.stopping = False
self.pairID = pairID
self.txreq = '{"type":"request","second_factor_data":"' + str(txdata['secureScreenData']).encode('hex') + '"}' if txdata else None
self.dlg = dlg
self.dongle = self.dlg.dongle
self.data = None
#websocket.enableTrace(True)
logging.basicConfig(level=logging.INFO)
self.ws = websocket.WebSocketApp('wss://ws.ledgerwallet.com/2fa/channels',
on_message = self.on_message, on_error = self.on_error,
on_close = self.on_close, on_open = self.on_open)
def run(self):
while not self.stopping:
self.ws.run_forever()
def stop(self):
debug_msg("WS: Stopping")
self.stopping = True
self.ws.close()
def on_message(self, ws, msg):
data = json.loads(msg)
if data['type'] == 'identify':
debug_msg('Identify')
apdu = [0xe0, 0x12, 0x01, 0x00, 0x41] # init pairing
apdu.extend(data['public_key'].decode('hex'))
try:
challenge = self.dongle.exchange( bytearray(apdu) )
ws.send( '{"type":"challenge","data":"%s" }' % str(challenge).encode('hex') )
self.data = data
except BTChipException, e:
debug_msg('Identify Failed')
if data['type'] == 'challenge':
debug_msg('Challenge')
apdu = [0xe0, 0x12, 0x02, 0x00, 0x10] # confirm pairing
apdu.extend(data['data'].decode('hex'))
try:
self.dongle.exchange( bytearray(apdu) )
debug_msg('Pairing Successful')
ws.send( '{"type":"pairing","is_successful":"true"}' )
self.data['pairid'] = self.pairID
self.pairing_done.emit(self.data)
except BTChipException, e:
debug_msg('Pairing Failed')
ws.send( '{"type":"pairing","is_successful":"false"}' )
self.pairing_done.emit(None)
ws.send( '{"type":"disconnect"}' )
self.stopping = True
ws.close()
if data['type'] == 'accept':
debug_msg('Accepted')
self.req_updated.emit('accepted')
if data['type'] == 'response':
debug_msg('Responded', data)
self.req_updated.emit(str(data['pin']) if data['is_accepted'] else '')
self.txreq = None
self.stopping = True
ws.close()
if data['type'] == 'repeat':
debug_msg('Repeat')
if self.txreq:
ws.send( self.txreq )
debug_msg("Req Sent", self.txreq)
if data['type'] == 'connect':
debug_msg('Connected')
if self.txreq:
ws.send( self.txreq )
debug_msg("Req Sent", self.txreq)
if data['type'] == 'disconnect':
debug_msg('Disconnected')
ws.close()
def on_error(self, ws, error):
message = getattr(error, 'strerror', '')
if not message:
message = getattr(error, 'message', '')
debug_msg("WS: %s" % message)
def on_close(self, ws):
debug_msg("WS: ### socket closed ###")
def on_open(self, ws):
debug_msg("WS: ### socket open ###")
debug_msg("Joining with pairing ID", self.pairID)
ws.send( '{"type":"join","room":"%s"}' % self.pairID )
ws.send( '{"type":"repeat"}' )
if self.txreq:
ws.send( self.txreq )
debug_msg("Req Sent", self.txreq)
def debug_msg(*args):
if DEBUG:
print_msg(*args)
| 40.775862 | 157 | 0.576885 |
79489b7008fc251521a1d53dc4fb2a3b76dc6fd1
| 3,183 |
py
|
Python
|
contrib/testgen/base58.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
contrib/testgen/base58.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
contrib/testgen/base58.py
|
vivuscoin/vivuscoin
|
ba0db89712234bf68b2d6b63ef2c420d65c7c25d
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2012-2018 The Bitcoin Core developers
# Copyright (c) 2021 The Vivuscoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Vivuscoin base58 encoding and decoding.
Based on https://vivuscointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
if isinstance(c, str):
c = ord(c)
long_value += (256**i) * c
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Vivuscoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == 0:
nPad += 1
else:
break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for i, c in enumerate(v[::-1]):
pos = __b58chars.find(c)
assert pos != -1
long_value += pos * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]:
nPad += 1
continue
break
result = bytes(nPad) + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21:
return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/vivuscoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
| 27.205128 | 97 | 0.625196 |
79489c3e0ecda38f25ac78eb8365ab9ca319621d
| 559 |
py
|
Python
|
warehouse/urls.py
|
NotEnterprising/DATABASE_supermarket
|
2d54e4ba66e24bf30f26a97d553eef23f601d821
|
[
"MIT"
] | null | null | null |
warehouse/urls.py
|
NotEnterprising/DATABASE_supermarket
|
2d54e4ba66e24bf30f26a97d553eef23f601d821
|
[
"MIT"
] | null | null | null |
warehouse/urls.py
|
NotEnterprising/DATABASE_supermarket
|
2d54e4ba66e24bf30f26a97d553eef23f601d821
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import WarehouseListView, WarehouseCreateView, WarehouseUpdateView, WarehouseDetailView, WarehouseDeleteView
urlpatterns = [
path('list/', WarehouseListView.as_view(), name='warehouse-list'),
path('<int:pk>/', WarehouseDetailView.as_view(), name='warehouse-detail'),
path('create/', WarehouseCreateView.as_view(), name='warehouse-create'),
path('<int:pk>/update/', WarehouseUpdateView.as_view(), name='warehouse-update'),
path('<int:pk>/delete/', WarehouseDeleteView.as_view(), name='warehouse-delete'),
]
| 46.583333 | 120 | 0.753131 |
79489ca0b3b4ada29645b07dce74663e06baaaf4
| 12,559 |
py
|
Python
|
model/VAE.py
|
shpandey/MRIproject
|
040c904ea2aea81776787021e904400d1c5a8974
|
[
"MIT"
] | null | null | null |
model/VAE.py
|
shpandey/MRIproject
|
040c904ea2aea81776787021e904400d1c5a8974
|
[
"MIT"
] | 10 |
2020-01-28T22:13:58.000Z
|
2022-03-11T23:53:26.000Z
|
model/VAE.py
|
JP-MRPhys/fastMRI-GAN
|
f69c9947edd95a7b85027abd2049c5aeb892569d
|
[
"MIT"
] | null | null | null |
#import tensorflow as tf
import tensorflow.compat.v1 as tf #NOTE: To train on tensorflow version 2.0
tf.disable_v2_behavior()
import h5py
import os
import numpy as np
import pathlib
#from utils.subsample import MaskFunc
#import utils.transforms as T
from matplotlib import pyplot as plt
from fastmri_data import get_training_pair_images_vae, get_random_accelerations
import math
import logging
import shutil
LOG_FILENAME="./logs/VAE_TRAINING.LOG"
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG)
class CVAE(tf.keras.Model):
def __init__(self):
super(CVAE, self).__init__()
#TODO: add config parser
#self.initizler = tf.keras.initializers.TruncatedNormal(mean=0.0, stddev=0.05, seed=None)
#self.training_datadir='/media/jehill/DATA/ML_data/fastmri/singlecoil/train/singlecoil_train/'
self.training_datadir = '/jmain01/home/JAD029/txl04/jxp48-txl04/data/fastmri_singlecoil/singlecoil_train/'
self.BATCH_SIZE = 16
self.num_epochs = 150
self.learning_rate = 1e-3
self.model_name="CVAE"
self.image_dim = 128
self.channels = 1
self.latent_dim = 64
self.kernel_size = 3
lrelu = lambda x: tf.keras.activations.relu(x, alpha=0.3)
self.activation = lrelu
self.input_image_1 = tf.placeholder(tf.float32, shape=[None, 256, 256, self.channels]) #for time being resize images
self.input_image = tf.image.resize_images(self.input_image_1, [np.int(self.image_dim), np.int(self.image_dim)])
self.image_shape = self.input_image.shape[1:]
self.learning_rate = tf.placeholder(tf.float32, [], name='learning_rate')
self.encoder = self.inference_net()
self.decoder = self.generative_net() # note these are keras model
mean, logvar = tf.split(self.encoder(self.input_image), num_or_size_splits=2, axis=1)
self.z = self.reparameterize(mean, logvar)
logits = self.decoder(self.z)
self.reconstructed = tf.sigmoid(logits)
# calculate the KL loss
var = tf.exp(logvar)
kl_loss = 0.5 * tf.reduce_sum(tf.square(mean) + var - 1. - logvar)
# cal mse loss
sse_loss = 0.5 * tf.reduce_sum(tf.square(self.input_image - logits))
self.total_loss = tf.reduce_mean(kl_loss + sse_loss) / self.BATCH_SIZE
self.list_gradients = self.encoder.trainable_variables + self.decoder.trainable_variables
self.Optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.5).minimize(self.total_loss, var_list=self.list_gradients)
# summary and writer for tensorboard visulization
tf.summary.image("Reconstructed image", self.reconstructed)
tf.summary.image("Input image", self.input_image)
tf.summary.scalar("KL", kl_loss)
tf.summary.scalar("SSE",sse_loss)
tf.summary.scalar("Total loss", self.total_loss)
self.merged_summary = tf.summary.merge_all()
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver()
self.logdir = './trained_models/' + self.model_name # if not exist create logdir
self.image_dir = self.logdir + '/images/'
self.model_dir = self.logdir + '/final_model'
self.gpu_list=['/gpu:0', '/gpu:1' ,'/gpu:2', '/gpu:3']
#self.gpu_list = ['/gpu:0']
print("Completed creating the model")
logging.debug("Completed creating the model")
if (os.path.exists(self.image_dir)):
shutil.rmtree(self.image_dir, ignore_errors=True)
os.makedirs(self.image_dir)
else:
os.makedirs(self.image_dir)
def inference_net(self):
input_image = tf.keras.layers.Input(self.image_shape) # 224,224,1
net = tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu')(input_image) # 112,112,32
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2D(
filters=64, kernel_size=3, strides=(2, 2), activation='relu')(net) # 56,56,64
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2D(
filters=32, kernel_size=3, strides=(2, 2), activation='relu')(net) # 56,56,64
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Flatten()(net)
# No activation
net = tf.keras.layers.Dense(self.latent_dim + self.latent_dim)(net)
net = tf.keras.Model(inputs=input_image, outputs=net)
return net
def generative_net(self):
latent_input = tf.keras.layers.Input((self.latent_dim,))
net = tf.keras.layers.Dense(units=8 * 8 * 128, activation=tf.nn.relu)(latent_input)
net = tf.keras.layers.Reshape(target_shape=(8, 8, 128))(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2DTranspose(
filters=256,
kernel_size=5,
strides=(2, 2),
padding="SAME",
activation=self.activation)(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2DTranspose(
filters=128,
kernel_size=5,
strides=(2, 2),
padding="SAME",
activation=self.activation)(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2DTranspose(
filters=64,
kernel_size=5,
strides=(2, 2),
padding="SAME",
activation=self.activation)(net)
net = tf.keras.layers.BatchNormalization()(net)
net = tf.keras.layers.Conv2DTranspose(
filters=32,
kernel_size=5,
strides=(2, 2),
padding="SAME",
activation=self.activation)(net)
net = tf.keras.layers.BatchNormalization()(net)
# No activation
net = tf.keras.layers.Conv2DTranspose(
filters=self.channels, kernel_size=3, strides=(1, 1), padding="SAME", activation=None)(net)
upsampling_net = tf.keras.Model(inputs=latent_input, outputs=net)
return upsampling_net
def reparameterize(self, mean, logvar):
eps = tf.random.normal(tf.shape(mean))
# return eps * tf.exp(logvar * .5) + mean
return eps * tf.sqrt(tf.exp(logvar)) + mean
def train(self):
for d in self.gpu_list:
with tf.device(d):
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as self.sess:
#learning_rate=1e-3
counter = 0
self.train_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
self.sess.run(self.init)
# so can see improvement fix z_samples
z_samples = np.random.uniform(-1, 1, size=(self.BATCH_SIZE, self.latent_dim)).astype(np.float32)
for epoch in range(0, self.num_epochs):
print("************************ epoch:" + str(epoch) + "*****************")
logging.debug("************************ epoch:" + str(epoch) + "*****************")
learning_rate=self.step_decay(epoch)
filenames = list(pathlib.Path(self.training_datadir).iterdir())
np.random.shuffle(filenames)
print("Number training data " + str(len(filenames)))
np.random.shuffle(filenames)
for file in filenames:
centre_fraction, acceleration = get_random_accelerations(high=5)
# training_images: fully sampled MRI images
# training labels: , obtained using various mask functions, here we obtain using center_fraction =[], acceleration=[]
training_images, training_labels = get_training_pair_images_vae(file, centre_fraction, acceleration)
[batch_length, x, y, z] = training_images.shape
for idx in range(0, batch_length, self.BATCH_SIZE):
batch_images = training_images[idx:idx + self.BATCH_SIZE, :, :]
batch_labels = training_labels[idx:idx + self.BATCH_SIZE, :, :]
feed_dict = {self.input_image_1: batch_images,
self.learning_rate: learning_rate}
summary, reconstructed_images, opt, loss = self.sess.run( [self.merged_summary, self.reconstructed, self.Optimizer, self.total_loss],
feed_dict=feed_dict)
elbo = -loss
if math.isnan(elbo):
logging.debug("Epoch: " + str(epoch) + "stopping as elbo is nan")
break
#sampled_image = self.sess.run(self.reconstructed, feed_dict={self.z: z_samples})
print("Epoch: " + str(epoch) + " learning rate:" + str(learning_rate) + "ELBO: " + str(elbo))
counter += 1
if (counter % 50 == 0):
logging.debug("Epoch: " + str(epoch) + " learning rate:" + str(learning_rate) + "ELBO: " + str(elbo))
sampled_image = self.sess.run(self.reconstructed, feed_dict={self.z: z_samples})
logging.debug("Epoch: " + str(epoch) + "completed")
print("epoch:" + str(epoch) + "Completed")
self.save_images(reconstructed_images,"recon"+str(epoch))
self.save_images(sampled_image,"sample"+str(epoch))
if (epoch % 10 == 0):
logging.debug("Epoch: " + str(epoch) + " learning rate:" + str(learning_rate) + "ELBO: " + str(elbo))
if math.isnan(elbo):
logging.debug("Epoch: " + str(epoch) + "stopping as elbo is nan")
break
self.save_model(self.model_name)
if (epoch % 20 == 0):
self.train_writer.add_summary(summary)
print("Training completed .... Saving model")
logging.debug(("Training completed .... Saving model"))
self.save_model(self.model_name + "_final")
print("All completed good bye")
def sample(self):
with tf.device('/gpu:0'):
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as self.sess:
self.train_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
self.sess.run(self.init)
# so can see improvement fix z_samples
z_samples = np.random.uniform(-1, 1, size=(self.batch_size, self.latent_dim)).astype(np.float32)
sampled_image = self.sess.run(self.reconstructed, feed_dict={self.z: z_samples})
return sampled_image
def save_model(self, model_name):
print ("Saving the model after training")
if (os.path.exists(self.model_dir)):
shutil.rmtree(self.model_dir, ignore_errors=True)
os.makedirs(self.model_dir)
self.saver.save(self.sess, os.path.join(self.model_dir, self.model_name))
print("Completed saving the model")
logging.debug("Completed saving the model")
def load_model(self, model_name):
print ("Checking for the model")
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as new_sess:
saver =tf.train.import_meta_graph((model_name + '.meta'))
#saver.restore(self.sess, self.model_dir)
saver.restore(new_sess,tf.train.latest_checkpoint("./"))
print ("Session restored")
return new_sess
def step_decay(self, epoch):
initial_lrate=0.001
drop = 0.5
epochs_drop=4
lrate= initial_lrate* math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
def save_images(self, numpy_array, tag):
fig = plt.figure(figsize=(4,4))
for i in range(numpy_array.shape[0]):
plt.subplot(4,4,i+1)
plt.imshow(numpy_array[i,:,:,0], cmap='gray')
plt.axis("off")
filename=self.image_dir + '_image_at_epoch_' + tag + '_.png';
plt.savefig(filename)
if __name__ == '__main__':
model=CVAE()
model.train()
| 39.124611 | 161 | 0.590493 |
79489d369e4393c81add7710ecda8068f08f14d9
| 9,457 |
py
|
Python
|
extrap/gui/CubeFileReader.py
|
arima0714/extrap
|
ac535fa9a4d10ecb59a29f063ff0515b6a98517f
|
[
"BSD-3-Clause"
] | 13 |
2020-11-12T21:50:09.000Z
|
2022-03-03T11:01:27.000Z
|
extrap/gui/CubeFileReader.py
|
arima0714/extrap
|
ac535fa9a4d10ecb59a29f063ff0515b6a98517f
|
[
"BSD-3-Clause"
] | 3 |
2020-12-04T12:49:37.000Z
|
2021-05-06T11:41:31.000Z
|
extrap/gui/CubeFileReader.py
|
arima0714/extrap
|
ac535fa9a4d10ecb59a29f063ff0515b6a98517f
|
[
"BSD-3-Clause"
] | 6 |
2021-01-14T16:06:47.000Z
|
2021-09-01T09:54:37.000Z
|
# This file is part of the Extra-P software (http://www.scalasca.org/software/extra-p)
#
# Copyright (c) 2020, Technical University of Darmstadt, Germany
#
# This software may be modified and distributed under the terms of a BSD-style license.
# See the LICENSE file in the base directory for details.
from functools import partial
from threading import Event
from PySide2.QtCore import * # @UnusedWildImport
from PySide2.QtWidgets import * # @UnusedWildImport
from extrap.fileio.cube_file_reader2 import read_cube_file
from extrap.util.exceptions import CancelProcessError
from extrap.util.progress_bar import ProgressBar
class ParameterWidget(QWidget):
def __init__(self, parent):
super(ParameterWidget, self).__init__(parent)
self.name = "Parameter"
self.values = "1"
def init_UI(self):
layout = QFormLayout(self)
self.name_edit = QLineEdit(self)
self.name_edit.setText(self.name)
layout.addRow("Parameter name:", self.name_edit)
self.values_edit = QLineEdit(self)
self.values_edit.setText(self.values)
layout.addRow("Values:", self.values_edit)
self.setLayout(layout)
def onNewValues(self):
self.name_edit.setText(self.name)
self.values_edit.setText(self.values)
class CubeFileReader(QDialog):
def __init__(self, parent, dirName):
super(CubeFileReader, self).__init__(parent)
self.valid = False
self.dir_name = dirName
self.num_params = 1
self.max_params = 3
self.prefix = ""
self.postfix = ""
self.filename = "profile.cubex"
self.repetitions = 1
self.parameters = list()
self._cancel_event = Event()
# for _ in range(0, self.max_params):
# self.parameters.append(ParameterWidget(self))
self.init_UI()
def init_UI(self):
self.setWindowTitle("Import Settings")
self.setWindowModality(Qt.WindowModal)
self.setWindowFlag(Qt.WindowContextHelpButtonHint, False)
self.setWindowFlag(Qt.WindowCloseButtonHint, False)
main_layout = QFormLayout(self)
layout = QFormLayout()
self.controls_layout = layout
# self.num_params_choice = QSpinBox(self)
# self.num_params_choice.setMinimum(1)
# self.num_params_choice.setMaximum(self.max_params)
# self.num_params_choice.setValue(self.num_params)
# self.num_params_choice.valueChanged.connect(self.change_param_num)
#
# layout.addRow("Number of Parameters:", self.num_params_choice)
#
# self.prefix_edit = QLineEdit(self)
# self.prefix_edit.setText(self.prefix)
#
# layout.addRow("Prefix:", self.prefix_edit)
#
# self.postfix_edit = QLineEdit(self)
# self.postfix_edit.setText(self.postfix)
#
# layout.addRow("Postfix:", self.postfix_edit)
#
# self.filename_edit = QLineEdit(self)
# self.filename_edit.setText(self.filename)
#
# layout.addRow("File name:", self.filename_edit)
#
# self.parameter_tabs = QTabWidget(self)
# self.parameter_tabs.setMovable(False)
# self.parameter_tabs.setTabsClosable(False)
# for param in self.parameters:
# param.init_UI()
#
# layout.addRow(self.parameter_tabs)
#
# self.spin_box = QSpinBox(self)
# self.spin_box.setMinimum(1)
# spin_box_max_val = 1073741824
# self.spin_box.setMaximum(spin_box_max_val)
# self.spin_box.setValue(self.repetitions)
#
# layout.addRow("Repetitions:", self.spin_box)
#
self.scaling_choice = QComboBox(self)
self.scaling_choice.addItem("weak")
self.scaling_choice.addItem("strong")
layout.addRow("Scaling type:", self.scaling_choice)
self.progress_indicator = QProgressBar(self)
self.progress_indicator.hide()
layout.addRow(self.progress_indicator)
# If the user presses the enter key on any element it activates the
# first button somehow. Thus, create a fake button, that does nothing
# To avoid that any entry in the value list activates that OK button.
# fake_button = QPushButton(self)
# fake_button.setText("OK")
# fake_button.move(-1120, -1220)
# fake_button.hide()
# self.ok_button = QPushButton(self)
# self.ok_button.setText("OK")
# self.ok_button.clicked.connect(self.ok_button.hide)
# self.ok_button.pressed.connect(self.ok_pressed)
#
# layout.addRow(self.ok_button)
# cancel_button = QPushButton(self)
# cancel_button.setText("Cancel")
# cancel_button.pressed.connect(self.close)
main_layout.addRow(layout)
self.buttonBox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
main_layout.addRow(self.buttonBox)
# self.change_param_num()
self.setLayout(main_layout)
# def change_param_num(self):
# self.num_params = self.num_params_choice.value()
# self.parameter_tabs.clear()
# for param in self.parameters:
# param.hide()
# for index in range(0, self.num_params):
# self.parameter_tabs.addTab(self.parameters[index],
# "Parameter " + str(index + 1))
# self.parameters[index].show()
#
# self.autoFillOptions()
# self.prefix_edit.setText(self.prefix)
# self.postfix_edit.setText(self.postfix)
# self.filename_edit.setText(self.filename)
# self.spin_box.setValue(self.repetitions)
# for index in range(0, self.num_params):
# self.parameters[index].onNewValues()
#
# self.update()
#
# def autoFillOptions(self):
#
# auto_fill_rule = r"^([^.]+)"
# for i in range(0, self.num_params):
# auto_fill_rule = auto_fill_rule + r"([_\.][^\d]+)(\d+)"
# auto_fill_rule = auto_fill_rule + r"[_\.]r(\d+)$"
# auto_fill_regex = re.compile(auto_fill_rule)
#
# # meaningful defaults
# self.filename = "profile.cubex"
# self.postfix = ""
# self.prefix = ""
# self.repetitions = 1
#
# # get list of existing directories matching the pattern
# available_dirs = os.listdir(self.dir_name)
# dir_matches = list()
# for d in available_dirs:
# m = auto_fill_regex.match(d)
# if m is not None:
# dir_matches.append(m)
#
# # print("matched directoty list with given pattern: ", dir_matches)
#
# if len(dir_matches) == 0:
# return
#
# # get prefix from first match
# self.prefix = dir_matches[0].group(1)
# matching_prefix = [d for d in dir_matches if d.group(1) == self.prefix]
#
# for i in range(0, self.num_params):
# # get parameter name from first match
# self.parameters[i].name = dir_matches[0].group(2 + i * 2)
#
# # extract all values for parameter p
# available_p_values = sorted(
# set(int(m.group(3 + i * 2)) for m in matching_prefix))
# self.parameters[i].values = ','.join(
# str(v) for v in available_p_values)
#
# # get maximum repetition count
# max_repetitions = max(int(m.group(2 + self.num_params * 2))
# for m in matching_prefix)
# self.repetitions = max_repetitions
@Slot()
def reject(self):
self._cancel_event.set()
super().reject()
@Slot()
def accept(self):
# self.prefix = self.prefix_edit.text()
# self.postfix = self.postfix_edit.text()
# self.filename = self.filename_edit.text()
# self.repetitions = self.spin_box.value()
self.scaling_type = self.scaling_choice.currentText()
with ProgressBar(total=0, gui=True) as pbar:
self._show_progressbar()
pbar.display = partial(self._display_progress, pbar)
pbar.sp = None
# read the cube files
try:
self.experiment = read_cube_file(self.dir_name, self.scaling_type, pbar)
except Exception as err:
self.close()
raise err
if not self.experiment:
QMessageBox.critical(self,
"Error",
"Could not read Cube Files, may be corrupt!",
QMessageBox.Ok,
QMessageBox.Ok)
self.close()
return
self.valid = True
super().accept()
def _show_progressbar(self):
self.controls_layout.setEnabled(False)
self.buttonBox.button(QDialogButtonBox.Ok).setEnabled(False)
self.progress_indicator.show()
def _display_progress(self, pbar: ProgressBar, msg=None, pos=None):
if self._cancel_event.is_set():
raise CancelProcessError()
self.progress_indicator.setMaximum(pbar.total)
self.progress_indicator.setValue(pbar.n)
QApplication.processEvents()
| 35.82197 | 88 | 0.606323 |
79489d9929339de87b59af7586d3f9f1b0bdd150
| 391 |
py
|
Python
|
HLTrigger/Configuration/python/HLT_75e33/paths/HLT_Ele26_WP70_Unseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1 |
2021-11-30T16:24:46.000Z
|
2021-11-30T16:24:46.000Z
|
HLTrigger/Configuration/python/HLT_75e33/paths/HLT_Ele26_WP70_Unseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 4 |
2021-11-29T13:57:56.000Z
|
2022-03-29T06:28:36.000Z
|
HLTrigger/Configuration/python/HLT_75e33/paths/HLT_Ele26_WP70_Unseeded_cfi.py
|
PKUfudawei/cmssw
|
8fbb5ce74398269c8a32956d7c7943766770c093
|
[
"Apache-2.0"
] | 1 |
2021-11-30T16:16:05.000Z
|
2021-11-30T16:16:05.000Z
|
import FWCore.ParameterSet.Config as cms
from ..modules.hltPreEle26WP70Unseeded_cfi import *
from ..sequences.HLTBeginSequence_cfi import *
from ..sequences.HLTEle26WP70UnseededSequence_cfi import *
from ..sequences.HLTEndSequence_cfi import *
HLT_Ele26_WP70_Unseeded = cms.Path(
HLTBeginSequence +
hltPreEle26WP70Unseeded +
HLTEle26WP70UnseededSequence +
HLTEndSequence
)
| 27.928571 | 58 | 0.810742 |
79489e082dafefccd4c4ac9027eb2cc78b8057a8
| 1,115 |
py
|
Python
|
database/apis/api_psql.py
|
JhonSaguay/Observatorioapi
|
642a3c7fa6ac121225de156cd07b96d1ee3b34c0
|
[
"MIT"
] | null | null | null |
database/apis/api_psql.py
|
JhonSaguay/Observatorioapi
|
642a3c7fa6ac121225de156cd07b96d1ee3b34c0
|
[
"MIT"
] | null | null | null |
database/apis/api_psql.py
|
JhonSaguay/Observatorioapi
|
642a3c7fa6ac121225de156cd07b96d1ee3b34c0
|
[
"MIT"
] | null | null | null |
import requests
import psycopg2
import json
import time
def savedatabase(conexion,my_dict):
cur = conexion.cursor()
for dato in my_dict:
# dato_json={"en":dato}
json_string=(json.dumps(dato))
sql1="insert into apidata(datosjson) values ('"+json_string+"')"
cur.execute(sql1)
# conexion.commit()
def consultarapicompras(apiurl,conexion):
my_dict={'data':['prueba']}
cont=1
while len(my_dict['data'])>0:
try:
r = requests.get(apiurl+str(cont))
my_dict = r.json()
if len(my_dict['data'])==0:
continue
savedatabase(conexion,my_dict['data'])
conexion.commit()
print('entro: '+str(cont))
cont+=1
except:
print("Ha ocurrido un error")
time.sleep(5)
apiurl = "https://datosabiertos.compraspublicas.gob.ec/PLATAFORMA/api/search_ocds?year=2021&search=&page="
conexion = psycopg2.connect(host="localhost", database="PuebasApi", user="postgres", password="1998414")
consultarapicompras(apiurl,conexion)
conexion.close()
| 30.972222 | 106 | 0.61435 |
79489e2465732d974655139b8c55fbf53f8fb74d
| 6,628 |
py
|
Python
|
misc/scripts/upgrade-wildfly/lib/wildfly/upgrade/dependencies/3rd_party/fabric8-analytics-version-comparator/f8a_version_comparator/comparable_version.py
|
evtr/keycloak
|
35375146dedfa14ff1c117d22b840b1ab3cc8f73
|
[
"Apache-2.0"
] | 12,252 |
2015-01-01T02:39:03.000Z
|
2022-03-31T19:57:55.000Z
|
misc/scripts/upgrade-wildfly/lib/wildfly/upgrade/dependencies/3rd_party/fabric8-analytics-version-comparator/f8a_version_comparator/comparable_version.py
|
evtr/keycloak
|
35375146dedfa14ff1c117d22b840b1ab3cc8f73
|
[
"Apache-2.0"
] | 4,567 |
2015-01-06T09:27:52.000Z
|
2022-03-31T21:38:13.000Z
|
misc/scripts/upgrade-wildfly/lib/wildfly/upgrade/dependencies/3rd_party/fabric8-analytics-version-comparator/f8a_version_comparator/comparable_version.py
|
skew202/keycloak
|
2aaceeab7ef5e059fb3bc737e6271bad85cf1545
|
[
"Apache-2.0"
] | 5,376 |
2015-01-04T13:39:48.000Z
|
2022-03-31T19:57:56.000Z
|
# Copyright © 2018 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Geetika Batra <gbatra@redhat.com>
#
"""Module to implement Comparable Version class."""
import typing
from .item_object import IntegerItem
from .item_object import StringItem
from .item_object import ListItem
class ComparableVersion:
"""Class for Comparable Version."""
def __init__(self, version: str):
"""Initialize comparable version class.
:version: Version supplied as a string
"""
if not isinstance(version, str):
raise TypeError(
"Invalid type {got!r} of argument `version`, expected {expected!r}".format(
got=type(version),
expected=str
))
self.version = version
self.items = self.parse_version()
def __repr__(self):
"""Return representation of ComparableVersion object."""
return "{cls!s}(version={version!r})".format(
cls=self.__class__.__name__,
version=self.version
)
def __str__(self):
"""Return version string held by ComparableVersion object."""
return "{version!s}".format(
version=self.version
)
def __eq__(self, other):
"""Compare ComparableVersion objects for equality.
This rich comparison implies whether self == other
"""
# don't call compare_to(None)
if other is None:
return False
return self.compare_to(other) == 0
def __ne__(self, other):
"""Compare ComparableVersion objects for equality.
This rich comparison implies whether self != other
"""
# don't call compare_to(None)
if other is None:
return True
return self.compare_to(other) != 0
def __lt__(self, other):
"""Compare ComparableVersion objects.
This rich comparison implies whether self < other
"""
# don't call compare_to(None)
if other is None:
return False
return self.compare_to(other) == -1
def __le__(self, other):
"""Compare ComparableVersion objects.
This rich comparison implies whether self <= other
"""
# don't call compare_to(None)
if other is None:
return False
return self.compare_to(other) <= 0
def __gt__(self, other):
"""Compare ComparableVersion objects.
This rich comparison implies whether self > other
"""
# don't call compare_to(None)
if other is None:
return True
return self.compare_to(other) == 1
def __ge__(self, other):
"""Compare ComparableVersion objects.
This rich comparison implies whether self >= other
"""
# don't call compare_to(None)
if other is None:
return True
return self.compare_to(other) >= 0
def parse_version(self):
"""Parse version."""
# TODO: reduce cyclomatic complexity
ref_list = ListItem()
items = ref_list
parse_stack = list()
version = self.version.lower()
parse_stack.append(ref_list)
_is_digit = False
_start_index = 0
for _ch in range(0, len(version)):
ver_char = version[_ch]
if ver_char == ".":
if _ch == _start_index:
ref_list.add_item(IntegerItem(0))
else:
ref_list.add_item(self.parse_item(_is_digit, version[_start_index: _ch]))
_start_index = _ch + 1
elif ver_char == "-":
if _ch == _start_index:
ref_list.add_item(IntegerItem(0))
else:
ref_list.add_item(self.parse_item(_is_digit, version[_start_index: _ch]))
_start_index = _ch + 1
temp = ListItem()
ref_list.add_item(temp)
ref_list = temp
parse_stack.append(ref_list)
elif ver_char.isdigit():
if not _is_digit and _ch > _start_index:
ref_list.add_item(StringItem(version[_start_index: _ch], True))
_start_index = _ch
temp = ListItem()
ref_list.add_item(temp)
ref_list = temp
parse_stack.append(ref_list)
_is_digit = True
else:
if _is_digit and _ch > _start_index:
ref_list.add_item(self.parse_item(True, version[_start_index:_ch]))
_start_index = _ch
temp = ListItem()
ref_list.add_item(temp)
ref_list = temp
parse_stack.append(ref_list)
_is_digit = False
if len(version) > _start_index:
ref_list.add_item(self.parse_item(_is_digit, version[_start_index:]))
while parse_stack:
ref_list = parse_stack.pop()
ref_list.normalize()
return items
@staticmethod
def parse_item(_is_digit, buf):
"""Wrap items in version in respective object class."""
# TODO: make this function static (it does not need 'self')
if _is_digit:
return IntegerItem(buf)
return StringItem(buf, False)
def compare_to(self, obj: typing.Union["ComparableVersion", str]):
"""Compare two ComparableVersion objects."""
if isinstance(obj, ComparableVersion):
# compare two objects of the same type
cmp_result = self.items.compare_to(obj.items)
elif isinstance(obj, str):
# compare against string
cmp_result = self.items.compare_to(ComparableVersion(obj).items)
else:
raise TypeError(
"Invalid type {got!r} of argument `obj`, expected <{expected}>".format(
got=type(obj),
expected=typing.Union["ComparableVersion", str]
))
return cmp_result
| 30.971963 | 93 | 0.576946 |
79489e68282246d3583180b4eb3beb3bd20cab62
| 2,743 |
py
|
Python
|
camera.py
|
nolita26/Human-Act-Recogntion
|
df6c27c1b6b8564605332e510caa27777c25fa7b
|
[
"MIT"
] | 2 |
2020-05-17T12:53:06.000Z
|
2021-04-12T02:13:43.000Z
|
camera.py
|
nolita26/Human-Act-Recogntion
|
df6c27c1b6b8564605332e510caa27777c25fa7b
|
[
"MIT"
] | null | null | null |
camera.py
|
nolita26/Human-Act-Recogntion
|
df6c27c1b6b8564605332e510caa27777c25fa7b
|
[
"MIT"
] | null | null | null |
import cv2
from twilio.rest import Client
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
account_sid = 'AC9ff3f227c0a9de0606351f3656ee2274'
auth_token = '6eaec067e0c697309716afdc6e1c8a2a'
client = Client(account_sid, auth_token)
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
ds_factor=0.6
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
fgbg = cv2.createBackgroundSubtractorMOG2()
j = 0
fall=0
while(1):
count=0
ret, frame = self.video.read()
if ret:
ret, jpeg = cv2.imencode('.jpg', frame)
return jpeg.tobytes()
#Conver each frame to gray scale and subtract the background
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
#Find contours
#_ , contours, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
contours, hierarchy = cv2.findContours(fgmask,
cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = []
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas or [0])
max_area_index = areas.index(max_area)
cnt = contours[max_area_index]
M = cv2.moments(cnt)
x, y, w, h = cv2.boundingRect(cnt)
cv2.drawContours(fgmask, [cnt], 0, (255,255,255), 3,
maxLevel = 0)
if h < w:
j += 1
if j > 10:
cv2.putText(fgmask, 'FALL', (x, y),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255,255,255), 2)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
print("Fall detected")
count=1
# print(message.sid)
if h > w:
j = 0
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
fall=0
cv2.imshow('video', frame)
if cv2.waitKey(33) == 27:
break
if count==1 and fall==0:
message = client.messages.create(
body='ALERT!!! FALL DETECTED! Please call 102 immeditately',
from_='whatsapp:+14155238886',
to='whatsapp:+919892938847'
)
fall=1
| 35.623377 | 93 | 0.504557 |
79489f3fc4bcd6f44c2a66368b43b772766963e5
| 9,106 |
py
|
Python
|
lib/datasets/V1datasets.py
|
Wangyf46/deeplabv3plus-pytorch
|
dbbbe88d7774d6e02f7ea13e770ee4278dc528fd
|
[
"MIT"
] | 2 |
2019-08-03T12:02:12.000Z
|
2019-08-03T12:35:46.000Z
|
lib/datasets/V1datasets.py
|
Wangyf46/deeplabv3plus-pytorch
|
dbbbe88d7774d6e02f7ea13e770ee4278dc528fd
|
[
"MIT"
] | null | null | null |
lib/datasets/V1datasets.py
|
Wangyf46/deeplabv3plus-pytorch
|
dbbbe88d7774d6e02f7ea13e770ee4278dc528fd
|
[
"MIT"
] | null | null | null |
#-*-coding:utf-8-*-
# ----------------------------------------
# Written by Yude Wang
# ----------------------------------------
from __future__ import print_function, division
import os
import sys
import pandas as pd
import cv2
import tqdm
import multiprocessing
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
from lib.datasets.transform import *
import ipdb
class V1datasets(Dataset):
def __init__(self, dataset_name, cfg, period, aug):
self.dataset_name = dataset_name
self.dataset_dir = os.path.join(cfg.ROOT_DIR, dataset_name)
self.rst_dir = os.path.join('../../results', cfg.EXP_NAME, cfg.DATE, cfg.MODEL_BACKBONE)
self.eval_dir = os.path.join('../../eval_result', cfg.EXP_NAME, cfg.DATE, cfg.MODEL_BACKBONE)
if not os.path.isdir(self.rst_dir):
os.makedirs(self.rst_dir)
if not os.path.isdir(self.eval_dir):
os.makedirs(self.eval_dir)
self.period = period
if period == 'train':
self.img_dir = os.path.join(self.dataset_dir, 'train_img')
self.seg_dir = os.path.join(self.dataset_dir, 'train_gt')
self.name_list = os.listdir(self.seg_dir)
elif period == 'val':
self.img_dir = os.path.join(self.dataset_dir, 'val_img')
self.seg_dir = os.path.join(self.dataset_dir, 'val_gt')
self.name_list = os.listdir(self.seg_dir) ###
else:
self.img_dir = os.path.join('/nfs-data/wangyf/Output/seg_out/test_image_51')
self.name_list = os.listdir(self.img_dir)
self.rescale = None
self.centerlize = None
self.randomcrop = None
self.randomflip = None
self.randomrotation = None
self.randomscale = None
self.randomhsv = None
self.multiscale = None
self.totensor = ToTensor()
self.cfg = cfg
if dataset_name == 'clean_datasets_V1':
self.categories = [
'human'] # 1
self.num_categories = len(self.categories)
assert (self.num_categories+1 == self.cfg.MODEL_NUM_CLASSES)
self.cmap = self.__colormap(len(self.categories)+1)
if cfg.DATA_RESCALE > 0:
self.rescale = Rescale(cfg.DATA_RESCALE, fix=False)
if 'train' in self.period:
if cfg.DATA_RANDOMCROP > 0:
self.randomcrop = RandomCrop(cfg.DATA_RANDOMCROP)
if cfg.DATA_RANDOMROTATION > 0:
self.randomrotation = RandomRotation(cfg.DATA_RANDOMROTATION)
if cfg.DATA_RANDOMSCALE != 1:
self.randomscale = RandomScale(cfg.DATA_RANDOMSCALE)
if cfg.DATA_RANDOMFLIP > 0:
self.randomflip = RandomFlip(cfg.DATA_RANDOMFLIP)
if cfg.DATA_RANDOM_H > 0 or cfg.DATA_RANDOM_S > 0 or cfg.DATA_RANDOM_V > 0:
self.randomhsv = RandomHSV(cfg.DATA_RANDOM_H, cfg.DATA_RANDOM_S, cfg.DATA_RANDOM_V)
else:
self.multiscale = Multiscale(self.cfg.TEST_MULTISCALE)
def __len__(self):
return len(self.name_list)
def __getitem__(self, idx):
img_file = self.name_list[idx]
image = cv2.imread(img_file)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# image = np.array(io.imread(img_file),dtype=np.uint8)
r, c, _ = image.shape
sample = {'image': image, 'name': name, 'row': r, 'col': c}
if 'train' in self.period:
seg_file = self.seg_dir + '/' + name
segmentation = np.array(Image.open(seg_file).convert('L'))
sample['segmentation'] = segmentation
if self.cfg.DATA_RANDOM_H > 0 or self.cfg.DATA_RANDOM_S > 0 or self.cfg.DATA_RANDOM_V > 0:
sample = self.randomhsv(sample)
if self.cfg.DATA_RANDOMFLIP > 0:
sample = self.randomflip(sample)
if self.cfg.DATA_RANDOMROTATION > 0:
sample = self.randomrotation(sample)
if self.cfg.DATA_RANDOMSCALE != 1:
sample = self.randomscale(sample)
if self.cfg.DATA_RANDOMCROP > 0:
sample = self.randomcrop(sample)
if self.cfg.DATA_RESCALE > 0:
sample = self.rescale(sample)
else:
if self.cfg.DATA_RESCALE > 0:
sample = self.rescale(sample)
sample = self.multiscale(sample)
if 'segmentation' in sample.keys():
sample['mask'] = sample['segmentation'] < self.cfg.MODEL_NUM_CLASSES
t = sample['segmentation']
t[t >= self.cfg.MODEL_NUM_CLASSES] = 0
sample['segmentation_onehot'] = onehot(t, self.cfg.MODEL_NUM_CLASSES)
sample = self.totensor(sample)
return sample
def __colormap(self, N):
"""Get the map from label index to color
Args:
N: number of class
return: a Nx3 matrix
"""
cmap = np.zeros((N, 3), dtype=np.uint8)
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])
for i in range(N):
r = 0
g = 0
b = 0
idx = i
for j in range(7):
str_id = uint82bin(idx)
r = r ^ (np.uint8(str_id[-1]) << (7 - j))
g = g ^ (np.uint8(str_id[-2]) << (7 - j))
b = b ^ (np.uint8(str_id[-3]) << (7 - j))
idx = idx >> 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
def label2colormap(self, label):
m = label.astype(np.uint8)
r, c = m.shape
cmap = np.zeros((r, c, 3), dtype=np.uint8)
cmap[:, :, 0] = (m & 1) << 7 | (m & 8) << 3
cmap[:, :, 1] = (m & 2) << 6 | (m & 16) << 2
cmap[:, :, 2] = (m & 4) << 5
return cmap
def save_result(self, result_list, model_id):
"""Save test results
Args:
result_list(list of dict): [{'name':name1, 'predict':predict_seg1},{...},...]
"""
i = 1
folder_path = os.path.join(self.rst_dir, '%s_%s_cls' % (model_id, self.period))
if not os.path.exists(folder_path):
os.makedirs(folder_path)
for sample in result_list:
file_path = os.path.join(folder_path, '%s' % sample['name'])
# predict_color = self.label2colormap(sample['predict'])
# p = self.__coco2voc(sample['predict'])
cv2.imwrite(file_path, sample['predict'])
print('[%d/%d] %s saved' % (i, len(result_list), file_path))
i += 1
def do_python_eval(self, model_id):
predict_folder = os.path.join(self.rst_dir,'%s_%s_cls'%(model_id,self.period))
# predict_folder = '../../results/exp_V1/2019-06-29/res101_atrous/deeplabv3plus_val_cls/'
# predict_folder = '/nfs-data/wangyf/datasets/clean_datasets_V1/val_out/'
gt_folder = self.seg_dir
TP = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
P = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
T = np.zeros((self.cfg.MODEL_NUM_CLASSES), np.uint64)
for idx in range(len(self.name_list)):
print('%d/%d'%(idx,len(self.name_list)))
name = self.name_list[idx]
'''
str1 = name.split('.')[0]
str2 = str1.split('-')
if len(str2) != 1:
if str2[-1] == 'profile':
str1 = str2[0]
predict_file = predict_folder + '/' + str1 + '.png'
if os.path.isfile(predict_file) == False:
predict_file = predict_folder + '/' + str1 + '.jpg'
gt_file = os.path.join(gt_folder, '%s'%name)
predict = np.array(Image.open(predict_file))
gt = np.array(Image.open(gt_file).convert('L').resize((96, 160), Image.ANTIALIAS))
'''
predict_file = os.path.join(predict_folder, '%s'%name)
gt_file = os.path.join(gt_folder, '%s'%name)
predict = np.array(Image.open(predict_file)) ##
gt = np.array(Image.open(gt_file).convert('L'))
# predict = cv2.imread(predict_file)
# gt = cv2.imread(gt_file)
cal = gt < 255
mask = (predict==gt) & cal
for i in range(self.cfg.MODEL_NUM_CLASSES):
P[i] += np.sum((predict==i) * cal)
T[i] += np.sum((gt==i) * cal)
TP[i] += np.sum((gt==i) * mask)
TP = TP.astype(np.float64)
T = T.astype(np.float64)
P = P.astype(np.float64)
IoU = TP/(T+P-TP)
for i in range(self.cfg.MODEL_NUM_CLASSES):
if i == 0:
print('%15s:%7.3f%%'%('backbound', IoU[i] * 100))
else:
print('%15s:%7.3f%%'%(self.categories[i-1], IoU[i] * 100))
miou = np.mean(IoU)
print('==================================')
print('%15s:%7.3f%%'%('mIoU',miou * 100))
| 39.419913 | 102 | 0.544586 |
7948a02319228a5d974f3b3d1e0bba4861a1c76f
| 1,250 |
py
|
Python
|
back-end/migrations/versions/65d2fcbb8c38_add_comments_table.py
|
mahaoyang/flask-vuejs-madblog
|
04737b9d72d1fb011498b09c7dd6f0a43e704525
|
[
"MIT"
] | 756 |
2018-10-29T06:18:17.000Z
|
2022-03-30T09:10:36.000Z
|
back-end/migrations/versions/65d2fcbb8c38_add_comments_table.py
|
tianxinyueming/flask-vuejs-madblog
|
b5d239e8b7bf99ff917ac7c757a333de6cac664d
|
[
"MIT"
] | 8 |
2018-12-21T05:50:00.000Z
|
2021-09-08T04:35:51.000Z
|
back-end/migrations/versions/65d2fcbb8c38_add_comments_table.py
|
tianxinyueming/flask-vuejs-madblog
|
b5d239e8b7bf99ff917ac7c757a333de6cac664d
|
[
"MIT"
] | 204 |
2018-11-08T12:57:01.000Z
|
2022-03-15T07:51:32.000Z
|
"""add comments table
Revision ID: 65d2fcbb8c38
Revises: b7fa03819d57
Create Date: 2018-11-15 11:18:14.585190
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '65d2fcbb8c38'
down_revision = 'b7fa03819d57'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
# ### end Alembic commands ###
| 30.487805 | 91 | 0.6784 |
7948a1f35c0909a4f81426558064ff18b23b5e3e
| 5,163 |
py
|
Python
|
mfctracker/models.py
|
gonzoua/mfctracker
|
77800ee5ab17e4efefb12bfbd547bff1ed94c6a1
|
[
"BSD-2-Clause"
] | 3 |
2016-10-19T05:01:31.000Z
|
2019-06-06T18:20:11.000Z
|
mfctracker/models.py
|
gonzoua/mfctracker
|
77800ee5ab17e4efefb12bfbd547bff1ed94c6a1
|
[
"BSD-2-Clause"
] | 3 |
2017-11-28T17:31:58.000Z
|
2021-04-12T02:37:27.000Z
|
mfctracker/models.py
|
gonzoua/mfctracker
|
77800ee5ab17e4efefb12bfbd547bff1ed94c6a1
|
[
"BSD-2-Clause"
] | 1 |
2020-06-26T14:05:53.000Z
|
2020-06-26T14:05:53.000Z
|
# Copyright (c) 2016-2019 Oleksandr Tymoshenko <gonzo@bluezbox.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.crypto import get_random_string
import jsonfield
class Branch(models.Model):
"""Branch info"""
name = models.CharField(max_length=30, unique=True)
path = models.CharField(max_length=128, unique=True)
is_trunk = models.BooleanField(default=False)
# Last imported revision
last_commit = models.CharField(max_length=64)
# Branchpoint
branch_commit = models.CharField(max_length=64)
branch_date = models.DateTimeField()
@classmethod
def create(cls, name, path):
obj = cls(name=name, path=path)
return obj
@classmethod
def trunk(cls):
return cls.objects.get(is_trunk=True)
@classmethod
def maintenance(cls):
return cls.objects.filter(is_trunk=False)
class Commit(models.Model):
"""Single commit info"""
sha = models.CharField(primary_key=True, max_length=64)
svn_revision = models.IntegerField(null=True)
commit_counter = models.IntegerField(null=True)
author = models.CharField(max_length=30)
date = models.DateTimeField()
mfc_after = models.DateField(blank=True, null=True)
msg = models.TextField()
merged_to = models.ManyToManyField(Branch, blank=True, related_name='merges')
branch = models.ForeignKey(Branch, null=True, on_delete=models.SET_NULL, related_name='commits')
mfc_with = models.ManyToManyField("self", blank=True)
@classmethod
def create(cls, sha, author, date, msg):
commit = cls(sha=sha, author=author, date=date, msg=msg)
return commit
@property
def summary(self):
msg = self.msg.strip()
eol = msg.find('\n')
if eol >= 0:
return msg[0:eol]
return msg
@property
def more(self):
msg = self.msg.strip()
eol = msg.find('\n')
if eol >= 0:
return msg[eol:].strip()
return ''
@property
def viewvc_url(self):
return settings.VIEWVC_REVISION_URL.format(revision=self.svn_revision)
@property
def cgit_url(self):
return settings.CGIT_COMMIT_URL.format(sha=self.sha)
@property
def sha_abbr(self):
return self.sha[:8]
class Change(models.Model):
path = models.CharField(max_length=1024)
commit = models.ForeignKey(Commit, on_delete=models.CASCADE, related_name='changes')
@classmethod
def create(cls, commit, path):
commit = cls(path=path, commit=commit)
return commit
class UserProfile(models.Model):
'''User-specific data like basket, share URL, etc...'''
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
share_token = models.CharField(max_length=30, blank=True)
mfc_basket = jsonfield.JSONField(default=[])
do_not_merge = models.ManyToManyField(Commit, blank=True)
@classmethod
def create(cls, user):
obj = cls()
obj.user = user
obj.share_token = get_random_string(length=8)
return obj
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
profile = UserProfile.objects.create(share_token=get_random_string(length=8), user=instance)
profile.save()
class CommitNote(models.Model):
text = models.TextField()
commit = models.ForeignKey(Commit, on_delete=models.CASCADE, related_name='notes')
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='notes')
@classmethod
def create(cls, commit, user, text):
note = cls(commit=commit, user=user, text=text)
return note
| 35.854167 | 102 | 0.706566 |
7948a227444eb8363750141c94691e5b37b9a9ca
| 32,091 |
py
|
Python
|
pyspeckit/cubes/cubes.py
|
mwcraig/pyspeckit
|
6d6c09aac29549a8c094d97fb385c9283422bb82
|
[
"MIT"
] | null | null | null |
pyspeckit/cubes/cubes.py
|
mwcraig/pyspeckit
|
6d6c09aac29549a8c094d97fb385c9283422bb82
|
[
"MIT"
] | null | null | null |
pyspeckit/cubes/cubes.py
|
mwcraig/pyspeckit
|
6d6c09aac29549a8c094d97fb385c9283422bb82
|
[
"MIT"
] | 1 |
2018-10-02T15:11:17.000Z
|
2018-10-02T15:11:17.000Z
|
"""
~~~~~~~~
cubes.py
~~~~~~~~
From `agpy <http://code.google.com/p/agpy/source/browse/trunk/agpy/cubes.py>`_,
contains functions to perform various transformations on data cubes and their
headers.
"""
from __future__ import print_function
from astropy.extern.six.moves import xrange
from numpy import sqrt,repeat,indices,newaxis,pi,cos,sin,array,mean,nansum
from math import acos,atan2,tan
import numpy
import numpy as np
import copy
import os
import astropy.io.fits as fits
import astropy.wcs as pywcs
import tempfile
import warnings
from astropy import coordinates
from astropy import log
try:
from AG_fft_tools import smooth
smoothOK = True
except ImportError:
smoothOK = False
try:
from scipy.interpolate import UnivariateSpline
scipyOK = True
except ImportError:
scipyOK = False
from . import posang # agpy code
from ..parallel_map import parallel_map
from ..spectrum import smooth
dtor = pi/180.0
def blfunc_generator(x=None, polyorder=None, splineorder=None,
sampling=1):
"""
Generate a function that will fit a baseline (polynomial or spline) to a
data set. Either ``splineorder`` or ``polyorder`` must be set
Parameters
----------
x : np.ndarray or None
The X-axis of the fitted array. Will be set to
``np.arange(len(data))`` if not specified
polyorder : None or int
The polynomial order.
splineorder : None or int
sampling : int
The sampling rate to use for the data. Can set to higher numbers to
effectively downsample the data before fitting
"""
def blfunc(args, x=x):
yfit,yreal = args
if hasattr(yfit,'mask'):
mask = ~yfit.mask
else:
mask = np.isfinite(yfit)
if x is None:
x = np.arange(yfit.size, dtype=yfit.dtype)
ngood = np.count_nonzero(mask)
if polyorder is not None:
if ngood < polyorder:
return yreal
else:
endpoint = ngood - (ngood % sampling)
y = np.mean([yfit[mask][ii:endpoint:sampling]
for ii in range(sampling)], axis=0)
polypars = np.polyfit(x[mask][sampling/2:endpoint:sampling],
y, polyorder)
return yreal-np.polyval(polypars, x).astype(yreal.dtype)
elif splineorder is not None and scipyOK:
if splineorder < 1 or splineorder > 4:
raise ValueError("Spline order must be in {1,2,3,4}")
elif ngood <= splineorder:
return yreal
else:
log.debug("splinesampling: {0} "
"splineorder: {1}".format(sampling, splineorder))
endpoint = ngood - (ngood % sampling)
y = np.mean([yfit[mask][ii:endpoint:sampling]
for ii in range(sampling)], axis=0)
if len(y) <= splineorder:
raise ValueError("Sampling is too sparse. Use finer sampling or "
"decrease the spline order.")
spl = UnivariateSpline(x[mask][sampling/2:endpoint:sampling],
y,
k=splineorder,
s=0)
return yreal-spl(x)
else:
raise ValueError("Must provide polyorder or splineorder")
return blfunc
def baseline_cube(cube, polyorder=None, cubemask=None, splineorder=None,
numcores=None, sampling=1):
"""
Given a cube, fit a polynomial to each spectrum
Parameters
----------
cube: np.ndarray
An ndarray with ndim = 3, and the first dimension is the spectral axis
polyorder: int
Order of the polynomial to fit and subtract
cubemask: boolean ndarray
Mask to apply to cube. Values that are True will be ignored when
fitting.
numcores : None or int
Number of cores to use for parallelization. If None, will be set to
the number of available cores.
"""
x = np.arange(cube.shape[0], dtype=cube.dtype)
#polyfitfunc = lambda y: np.polyfit(x, y, polyorder)
blfunc = blfunc_generator(x=x,
splineorder=splineorder,
polyorder=polyorder,
sampling=sampling)
reshaped_cube = cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T
if cubemask is None:
log.debug("No mask defined.")
fit_cube = reshaped_cube
else:
if cubemask.dtype != 'bool':
raise TypeError("Cube mask *must* be a boolean array.")
if cubemask.shape != cube.shape:
raise ValueError("Mask shape does not match cube shape")
log.debug("Masking cube with shape {0} "
"with mask of shape {1}".format(cube.shape, cubemask.shape))
masked_cube = cube.copy()
masked_cube[cubemask] = np.nan
fit_cube = masked_cube.reshape(cube.shape[0], cube.shape[1]*cube.shape[2]).T
baselined = np.array(parallel_map(blfunc, zip(fit_cube,reshaped_cube), numcores=numcores))
blcube = baselined.T.reshape(cube.shape)
return blcube
def flatten_header(header,delete=False):
"""
Attempt to turn an N-dimensional fits header into a 2-dimensional header
Turns all CRPIX[>2] etc. into new keywords with suffix 'A'
header must be a fits.Header instance
"""
if not isinstance(header,fits.Header):
raise Exception("flatten_header requires a fits.Header instance")
newheader = header.copy()
for key in newheader.keys():
try:
if delete and int(key[-1]) >= 3 and key[:2] in ['CD','CR','CT','CU','NA']:
newheader.pop(key)
elif (int(key[-1]) >= 3 or int(key[2])>=3) and key[:2] in ['CD','CR','CT','CU','NA','PC']:
newheader.rename_keyword(key,'A'+key,force=True)
if delete and (int(key[4]) >= 3 or int(key[7]) >= 3) and key[:2]=='PC' and key in newheader:
newheader.pop(key)
except ValueError:
# if key[-1] is not an int
pass
except IndexError:
# if len(key) < 2
pass
newheader['NAXIS'] = 2
if header.get('WCSAXES'):
newheader['WCSAXES'] = 2
return newheader
def speccen_header(header, lon=None, lat=None, proj='TAN', system='celestial',
spectral_axis=3, celestial_axes=[1,2]):
"""
Turn a cube header into a spectrum header, retaining RA/Dec vals where possible
(speccen is like flatten; spec-ify would be better but, specify? nah)
Assumes 3rd axis is velocity
"""
newheader = header.copy()
new_spectral_axis = 1
newheader['CRVAL{0}'.format(new_spectral_axis)] = header.get('CRVAL{0}'.format(spectral_axis))
newheader['CRPIX{0}'.format(new_spectral_axis)] = header.get('CRPIX{0}'.format(spectral_axis))
if 'CD{0}_{0}'.format(new_spectral_axis) in header:
newheader.rename_keyword('CD{0}_{0}'.format(new_spectral_axis),
'OLDCD{0}_{0}'.format(new_spectral_axis))
elif 'CDELT{0}'.format(new_spectral_axis) in header:
newheader.rename_keyword('CDELT{0}'.format(new_spectral_axis),'OLDCDEL{0}'.format(new_spectral_axis))
if 'CD{0}_{0}'.format(spectral_axis) in header:
newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CD{0}_{0}'.format(spectral_axis))
elif 'CDELT{0}'.format(spectral_axis) in header:
newheader['CDELT{0}'.format(new_spectral_axis)] = header.get('CDELT{0}'.format(spectral_axis))
newheader['CTYPE{0}'.format(new_spectral_axis)] = 'VRAD'
if header.get('CUNIT{0}'.format(spectral_axis)):
newheader['CUNIT{0}'.format(new_spectral_axis)] = header.get('CUNIT{0}'.format(spectral_axis))
else:
print("Assuming CUNIT3 is km/s in speccen_header")
newheader['CUNIT{0}'.format(new_spectral_axis)] = 'km/s'
newheader['CRPIX2'] = 1
newheader['CRPIX{0}'.format(spectral_axis)] = 1
if system == 'celestial':
c2 = 'RA---'
c3 = 'DEC--'
elif system == 'galactic':
c2 = 'GLON-'
c3 = 'GLAT-'
elif system == 'PIXEL':
c2 = 'PIX--'
c3 = 'PIX--'
newheader['CTYPE2'] = c2+proj
newheader['CTYPE{0}'.format(spectral_axis)] = c3+proj
if lon is not None:
newheader['CRVAL2'] = lon
if lat is not None:
newheader['CRVAL{0}'.format(spectral_axis)] = lat
if 'CD2_2' in header:
newheader.rename_keyword('CD2_2','OLDCD2_2')
if 'CD{0}_{0}'.format(spectral_axis) in header:
newheader.rename_keyword('CD{0}_{0}'.format(spectral_axis),
'OLDCD{0}_{0}'.format(spectral_axis))
if 'CROTA2' in header:
newheader.rename_keyword('CROTA2','OLDCROT2')
return newheader
def extract_aperture(cube, ap, r_mask=False, wcs=None,
coordsys='galactic', wunit='arcsec', debug=False,
method='mean'):
"""
Extract an aperture from a data cube. E.g. to acquire a spectrum
of an outflow that is extended.
Cube should have shape [z,y,x], e.g.
cube = fits.getdata('datacube.fits')
Apertures are specified in PIXEL units with an origin of 0,0 (NOT the 1,1
fits standard!) unless wcs and coordsys are specified
Parameters
----------
ap : list
For a circular aperture, len(ap)=3:
ap = [xcen,ycen,radius]
For an elliptical aperture, len(ap)=5:
ap = [xcen,ycen,height,width,PA]
wcs : wcs
a pywcs.WCS instance associated with the data cube
coordsys : str
the coordinate system the aperture is specified in.
Options are 'celestial' and 'galactic'. Default is 'galactic'
wunit : str
units of width/height. default 'arcsec', options 'arcmin' and 'degree'
method : str
'mean' or 'sum' (average over spectra, or sum them)
or 'error' for sqrt(sum-of-squares / n)
Other Parameters
----------------
r_mask : bool
return mask in addition to spectrum (for error checking?)
"""
warnings.warn("SpectralCube can do what subimage_integ does much more easily!",
DeprecationWarning)
if wcs is not None and coordsys is not None:
if debug:
print("Converting aperture ",ap,)
ap = aper_world2pix(ap,wcs,coordsys=coordsys,wunit=wunit)
if debug:
print(" to ",ap)
if len(ap) == 3:
sh = cube.shape
yind,xind = indices(sh[1:3]) # recall that python indices are backwards
dis = sqrt((xind-ap[0])**2+(yind-ap[1])**2)
mask = dis < ap[2]
elif len(ap) == 5:
yinds,xinds = indices(cube.shape[1:3])
th = (ap[4])*dtor
xindr = (xinds-ap[0])*cos(th) + (yinds-ap[1])*sin(th)
yindr = (xinds-ap[0])*-sin(th) + (yinds-ap[1])*cos(th)
ratio = max(ap[2:4])/min(ap[2:4])
mask = ((xindr*ratio)**2 + yindr**2)**0.5 < max(ap[2:4])
else:
raise Exception("Wrong number of parameters. Need either 3 parameters "
"for a circular aperture or 5 parameters for an "
"elliptical aperture.")
npixinmask = mask.sum()
mask3d = repeat(mask[newaxis,:,:],cube.shape[0],axis=0)
if method == 'mean':
specsum = nansum(nansum((cube*mask3d),axis=2),axis=1)
spec = specsum / npixinmask
elif method == 'error':
specsum = nansum(nansum((cube*mask3d)**2,axis=2),axis=1)
spec = (specsum)**0.5 / npixinmask
else:
spec = nansum(nansum((cube*mask3d),axis=2),axis=1)
if r_mask:
return spec,mask
else:
return spec
def integ(file,vrange,xcen=None,xwidth=None,ycen=None,ywidth=None,**kwargs):
"""
wrapper of subimage_integ that defaults to using the full image
"""
if isinstance(file,fits.PrimaryHDU):
header = file.header
cube = file.data
elif isinstance(file,fits.HDUList):
header = file[0].header
cube = file[0].data
else:
file = fits.open(file)
header = file[0].header
cube = file[0].data
if None in [xcen,xwidth,ycen,ywidth]:
xcen = header['NAXIS1'] / 2
xwidth = xcen + header['NAXIS1'] % 2
ycen = header['NAXIS2'] / 2
ywidth = ycen + header['NAXIS2'] % 2
return subimage_integ(cube,xcen,xwidth,ycen,ywidth,vrange,header=header,**kwargs)
def subimage_integ(cube, xcen, xwidth, ycen, ywidth, vrange, header=None,
average=mean, dvmult=False, return_HDU=False,
units="pixels", zunits=None):
"""
Returns a sub-image from a data cube integrated over the specified velocity range
NOTE: With `spectral_cube <spectral-cube.rtfd.org>`_, subcube features can
be easily applied with the `.subcube` method, and integration is handled
separately.
Parameters
----------
cube : np.ndarray
A 3-dimensional numpy array with dimensions (velocity, y, x)
xcen,ycen : float
The center in the X,Y-dimension. See `units` below for unit information
xwidth,ywidth : float
The width in the X,Y-dimension. See `units` below for unit information
xwidth and ywidth are "radius" values, i.e. half the length that will be extracted
vrange : (float,float)
The velocity range to integrate over. See `zunits` below for unit information
header : `astropy.io.fits.Header` or None
If specified, will allow the use of WCS units
average : function
The function to apply when 'integrating' over the subcube
dvmult : bool
If dvmult is set, multiply the average by DV (this is useful if you set
average=sum and dvmul=True to get an integrated value, e.g. K km/s or
Jy km/s)
return_hdu : bool
If specified, will return an HDU object, otherwise will return the
array and header
units : 'pixels' or 'wcs'
If 'pixels', all units (xcen, ycen, xwidth, ywidth) will be in pixels.
If 'wcs', the values will be converted from WCS units to pixel units
using the WCS specified by the `header`
zunits : 'pixels' or 'wcs' or None
If None, will be set to be the same as `units`
Returns
-------
subim, hdu : tuple
A tuple (integrated array, header) if ``return_hdu`` is ``False``, or an HDU if
it is True
"""
if header:
flathead = flatten_header(header.copy())
wcs = pywcs.WCS(header=flathead)
if header.get('CD3_3'): CD3 = header.get('CD3_3')
else: CD3 = header.get('CDELT3')
if units=="pixels":
xlo = int( max([xcen-xwidth,0]) )
ylo = int( max([ycen-ywidth,0]) )
xhi = int( min([xcen+xwidth,cube.shape[2]]) )
yhi = int( min([ycen+ywidth,cube.shape[1]]) )
elif units=="wcs" and header:
newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)
try:
newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])
except AttributeError:
newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])
xlo = int( max([newxcen-newxwid,0]) )
ylo = int( max([newycen-newywid,0]) )
xhi = int( min([newxcen+newxwid,cube.shape[2]]) )
yhi = int( min([newycen+newywid,cube.shape[1]]) )
else:
print("Can only use wcs if you pass a header.")
if zunits is None:
zunits = units
if zunits == 'pixels':
zrange = vrange
if zunits == 'wcs':
zrange = ( array(vrange)-header.get('CRVAL3') ) / CD3 - 1 + header.get('CRPIX3')
subim = average(cube[zrange[0]:zrange[1],ylo:yhi,xlo:xhi],axis=0)
if dvmult and CD3: subim *= CD3
elif dvmult:
print("Error: could not multiply by dv; CD3=",CD3)
if header is None:
return subim
else:
# Cannot set crval2 != 0 for Galactic coordinates: therefore, probably
# wrong approach in general
#crv1,crv2 = wcs.wcs_pix2world(xlo,ylo,0)
#try:
# flathead['CRVAL1'] = crv1[0]
# flathead['CRVAL2'] = crv2[0]
#except IndexError:
# flathead['CRVAL1'] = crv1.item() # np 0-d arrays are not scalar
# flathead['CRVAL2'] = crv2.item() # np 0-d arrays are not scalar
# xlo, ylo have been forced to integers already above
flathead['CRPIX1'] = flathead['CRPIX1'] - xlo
flathead['CRPIX2'] = flathead['CRPIX2'] - ylo
if return_HDU:
return fits.PrimaryHDU(data=subim,header=flathead)
else:
return subim,flathead
def subcube(cube, xcen, xwidth, ycen, ywidth, header=None,
dvmult=False, return_HDU=False, units="pixels",
widthunits="pixels"):
"""
Crops a data cube
All units assumed to be pixel units
cube has dimensions (velocity, y, x)
xwidth and ywidth are "radius" values, i.e. half the length that will be extracted
if dvmult is set, multiple the average by DV (this is useful if you set
average=sum and dvmul=True to get an integrated value)
"""
if header:
newheader = header.copy()
flathead = flatten_header(header.copy())
wcs = pywcs.WCS(header=flathead)
if widthunits == "pixels":
newxwid, newywid = xwidth, ywidth
elif widthunits == "wcs":
try:
newxwid,newywid = xwidth / abs(wcs.wcs.cd[0,0]), ywidth / abs(wcs.wcs.cd[1,1])
except AttributeError:
newxwid,newywid = xwidth / abs(wcs.wcs.cdelt[0]), ywidth / abs(wcs.wcs.cdelt[1])
else:
raise Exception("widthunits must be either 'wcs' or 'pixels'")
if units=="pixels":
newxcen,newycen = xcen,ycen
elif units=="wcs" and header:
newxcen,newycen = wcs.wcs_world2pix(xcen,ycen,0)
else:
raise Exception("units must be either 'wcs' or 'pixels'")
x1 = int( numpy.floor( max([newxcen-newxwid,0]) ) )
y1 = int( numpy.floor( max([newycen-newywid,0]) ) )
x2 = int( numpy.ceil( min([newxcen+newxwid,cube.shape[2]]) ) )
y2 = int( numpy.ceil( min([newycen+newywid,cube.shape[1]]) ) )
xhi = max(x1,x2)
xlo = min(x1,x2)
yhi = max(y1,y2)
ylo = min(y1,y2)
subim = cube[:,ylo:yhi,xlo:xhi]
if return_HDU:
xmid_sky,ymid_sky = wcs.wcs_pix2world(xlo+xwidth,ylo+ywidth,0)
try:
newheader['CRVAL1'] = xmid_sky[0]
newheader['CRVAL2'] = ymid_sky[0]
except IndexError:
newheader['CRVAL1'] = float(xmid_sky)
newheader['CRVAL2'] = float(ymid_sky)
newheader['CRPIX1'] = 1+xwidth
newheader['CRPIX2'] = 1+ywidth
newHDU = fits.PrimaryHDU(data=subim,header=newheader)
if newHDU.header.get('NAXIS1') == 0 or newHDU.header.get('NAXIS2') == 0:
raise Exception("Cube has been cropped to 0 in one dimension")
return newHDU
else:
return subim
def aper_world2pix(ap,wcs,coordsys='galactic',wunit='arcsec'):
"""
Converts an elliptical aperture (x,y,width,height,PA) from
WCS to pixel coordinates given an input wcs (an instance
of the pywcs.WCS class). Must be a 2D WCS header.
"""
convopt = {'arcsec':3600.0,'arcmin':60.0,'degree':1.0}
try:
conv = convopt[wunit]
except:
raise Exception("Must specify wunit='arcsec','arcmin', or 'degree'")
if len(wcs.wcs.cdelt) != 2:
raise Exception("WCS header is not strictly 2-dimensional. Look for 3D keywords.")
if '' in wcs.wcs.ctype:
raise Exception("WCS header has no CTYPE.")
if coordsys.lower() == 'galactic':
pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='galactic')
elif coordsys.lower() in ('radec','fk5','icrs','celestial'):
pos = coordinates.SkyCoord(ap[0],ap[1],unit=('deg','deg'), frame='fk5')
if wcs.wcs.ctype[0][:2] == 'RA':
ra,dec = pos.icrs.ra.deg,pos.icrs.dec.deg
elif wcs.wcs.ctype[0][:4] == 'GLON':
ra,dec = pos.galactic.l.deg,pos.galactic.b.deg
else:
raise Exception("WCS CTYPE has no match.")
# workaround for a broken wcs.wcs_sky2pix
try:
radif = (wcs.wcs.crval[0]-ra)*dtor
gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor
theta = atan2( sin(radif) , ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )
x = -gamma * sin(theta) / wcs.wcs.cd[0,0] + wcs.wcs.crpix[0]
y = gamma * cos(theta) / wcs.wcs.cd[1,1] + wcs.wcs.crpix[1]
except:
radif = (wcs.wcs.crval[0]-ra)*dtor
gamma = acos(cos(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)*cos(radif)+sin(dec*dtor)*sin(wcs.wcs.crval[1]*dtor)) / dtor
theta = atan2( sin(radif) , ( tan(dec*dtor)*cos(wcs.wcs.crval[1]*dtor)-sin(wcs.wcs.crval[1]*dtor)*cos(radif) ) )
x = -gamma * sin(theta) / wcs.wcs.cdelt[0] + wcs.wcs.crpix[0]
y = gamma * cos(theta) / wcs.wcs.cdelt[1] + wcs.wcs.crpix[1]
#print "DEBUG: x,y from math (vectors): ",x,y
#x,y = wcs.wcs_world2pix(ra,dec,0) # convert WCS coordinate to pixel coordinate (0 is origin, do not use fits convention)
#print "DEBUG: x,y from wcs: ",x,y
try:
x=x[0] - 1 # change from FITS to python convention
y=y[0] - 1 # change from FITS to python convention
#print "DEBUG: x,y from math: ",x,y
except:
pass
# cd is default, cdelt is backup
if len(ap) > 3:
try:
width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention
height = ap[3] / conv / abs(wcs.wcs.cd[0,0])
except:
width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention
height = ap[3] / conv / abs(wcs.wcs.cdelt[0])
apold = copy.copy(ap)
if len(ap) == 5:
PA = ap[4]
ap = [x,y,width,height,PA]
else:
ap = [x,y,width,height]
elif len(ap) == 3:
try:
width = ap[2] / conv / abs(wcs.wcs.cd[0,0]) # first is width, second is height in DS9 PA convention
except:
width = ap[2] / conv / abs(wcs.wcs.cdelt[0]) # first is width, second is height in DS9 PA convention
apold = copy.copy(ap)
ap = [x,y,width]
else:
raise TypeError("Aperture length is incorrect.")
return ap
def getspec(lon,lat,rad,cube,header,r_fits=True,inherit=True,wunit='arcsec'):
"""
Given a longitude, latitude, aperture radius (arcsec), and a cube file,
return a .fits file or a spectrum.
Parameters
----------
lon: float
lat: float
longitude and latitude center of a circular aperture in WCS coordinates
must be in coordinate system of the file
rad: float
radius (default degrees) of aperture
"""
convopt = {'arcsec':1.0,'arcmin':60.0,'degree':3600.0}
flathead = flatten_header(header)
wcs = pywcs.WCS(flathead)
if wcs.wcs.ctype[0][:2] == 'RA':
coordsys='celestial'
elif wcs.wcs.ctype[0][:4] == 'GLON':
coordsys='galactic'
spec = extract_aperture(cube,[lon,lat,rad],wcs=wcs,
coordsys=coordsys,wunit=wunit)
if nansum(spec) == 0:
print("Total of extracted spectrum was zero. lon,lat,rad: ",lon,lat,rad)
#import pdb; pdb.set_trace()
if r_fits:
if inherit:
newhead = header.copy()
else:
newhead = fits.Header()
try:
newhead['CD1_1'] = header['CD3_3']
except KeyError:
newhead['CD1_1'] = header['CDELT3']
newhead['CRPIX1'] = header['CRPIX3']
newhead['CRVAL1'] = header['CRVAL3']
try:
newhead['CTYPE1'] = header['CTYPE3']
except KeyError:
newhead['CTYPE1'] = "VRAD"
try:
newhead['CUNIT1'] = header['CUNIT3']
except KeyError:
print("Header did not contain CUNIT3 keyword. Defaulting to km/s")
newhead['CUNIT1'] = "km/s"
newhead['BUNIT'] = header['BUNIT']
newhead['APGLON'] = lon
newhead['APGLAT'] = lat
newhead['APRAD'] = (rad*convopt[wunit],'arcseconds') # radius in arcsec
newfile = fits.PrimaryHDU(data=spec,header=newhead)
return newfile
else:
return spec
def getspec_reg(cubefilename,region,**kwargs):
"""
Aperture extraction from a cube using a pyregion circle region
The region must be in the same coordinate system as the cube header
.. warning:: The second argument of getspec_reg requires a pyregion region list,
and therefore this code depends on `pyregion`_.
"""
ds9tocoords = {'fk5':'celestial','galactic':'galactic','icrs':'celestial'}
if region.name != 'circle':
raise Exception("Only circular apertures are implemented so far")
l,b,r = region.coord_list
#pos = coords.Position([l,b],system=ds9tocoords[region.coord_format])
if isinstance(cubefilename,fits.HDUList):
cubefile = cubefilename
else:
cubefile = fits.open(cubefilename)
header = cubefile[0].header
cube = cubefile[0].data
if len(cube.shape) == 4: cube = cube[0,:,:,:]
sp = getspec(l,b,r,cube,header,wunit='degree',**kwargs)
return sp
def coords_in_image(fitsfile,lon,lat,system='galactic'):
"""
Determine whether the coordinates are inside the image
"""
if not isinstance(fitsfile,fits.HDUList):
fitsfile = fits.open(fitsfile)
wcs = pywcs.WCS(flatten_header(fitsfile[0].header))
if 'RA' in wcs.wcs.ctype[0]:
pos = coords.Position((lon,lat),system=system)
lon,lat = pos.j2000()
if 'GLON' in wcs.wcs.ctype[0]:
pos = coords.Position((lon,lat),system=system)
lon,lat = pos.galactic()
x,y = wcs.wcs_world2pix(lon,lat,0)
#DEBUG print x,y,wcs.naxis1,wcs.naxis2
if (0 < x < wcs.naxis1) and (0 < y < wcs.naxis2):
return True
else:
return False
def spectral_smooth(cube, smooth_factor, downsample=True, parallel=True,
numcores=None, **kwargs):
"""
Smooth the cube along the spectral direction
"""
yy,xx = numpy.indices(cube.shape[1:])
if downsample:
newshape = cube[::smooth_factor,:,:].shape
else:
newshape = cube.shape
# need to make the cube "flat" along dims 1&2 for iteration in the "map"
flatshape = (cube.shape[0],cube.shape[1]*cube.shape[2])
Ssmooth = lambda x: smooth.smooth(x, smooth_factor, downsample=downsample, **kwargs)
if parallel:
newcube = numpy.array(parallel_map(Ssmooth, cube.reshape(flatshape).T, numcores=numcores)).T.reshape(newshape)
else:
newcube = numpy.array(map(Ssmooth, cube.reshape(flatshape).T)).T.reshape(newshape)
#naive, non-optimal version
# for (x,y) in zip(xx.flat,yy.flat):
# newcube[:,y,x] = smooth.smooth(cube[:,y,x], smooth_factor,
# downsample=downsample, **kwargs)
return newcube
def plane_smooth(cube,cubedim=0,parallel=True,numcores=None,**kwargs):
"""
parallel-map the smooth function
Parameters
----------
parallel: bool
defaults True. Set to false if you want serial (for debug purposes?)
numcores: int
pass to parallel_map (None = use all available)
"""
if not smoothOK:
return
if cubedim != 0:
cube = cube.swapaxes(0,cubedim)
cubelist = [cube[ii,:,:] for ii in xrange(cube.shape[0])]
Psmooth = lambda C: smooth(C,**kwargs)
if parallel:
smoothcube = array(parallel_map(Psmooth,cubelist,numcores=numcores))
else:
smoothcube = array(map(Psmooth,cubelist))
if cubedim != 0:
smoothcube = smoothcube.swapaxes(0,cubedim)
return smoothcube
try:
import montage
def rotcrop_cube(x1, y1, x2, y2, cubename, outname, xwidth=25, ywidth=25,
in_system='galactic', out_system='equatorial',
clobber=True, newheader=None, xcen=None, ycen=None):
"""
Crop a data cube and then rotate it with montage
"""
cubefile = fits.open(cubename)
if xcen is None and ycen is None:
pos1 = coords.Position([x1,y1],system=in_system)
pos2 = coords.Position([x2,y2],system=in_system)
if cubefile[0].header.get('CTYPE1')[:2] == 'RA':
x1,y1 = pos1.j2000()
x2,y2 = pos2.j2000()
coord_system = 'celestial'
elif cubefile[0].header.get('CTYPE1')[:4] == 'GLON':
x1,y1 = pos1.galactic()
x2,y2 = pos2.galactic()
coord_system = 'galactic'
xcen = (x1+x2)/2.0
ycen = (y1+y2)/2.0
print(xcen,ycen,xwidth,ywidth,coord_system)
else:
coord_system = in_system
sc = subcube(cubefile[0].data, xcen, xwidth, ycen, ywidth,
widthunits='pixels', units="wcs", header=cubefile[0].header,
return_HDU=True)
# note: there should be no security risk here because fits' writeto
# will not overwrite by default
tempcube = tempfile.mktemp(suffix='.fits')
sc.writeto(tempcube)
pa = posang.posang(x1,y1,x2,y2,system=coord_system) - 90
if newheader is None:
newheader = sc.header.copy()
cd11 = newheader.get('CDELT1') if newheader.get('CDELT1') else newheader.get('CD1_1')
cd22 = newheader.get('CDELT2') if newheader.get('CDELT2') else newheader.get('CD2_2')
cd12 = newheader.get('CD1_2') if newheader.get('CD1_2') else 0.0
cd21 = newheader.get('CD2_1') if newheader.get('CD2_1') else 0.0
cdelt = numpy.sqrt(cd11**2+cd12**2)
tempheader = tempfile.mktemp(suffix='.hdr')
ycensign = "+" if numpy.sign(ycen) >= 0 else "-"
montage.mHdr("%s %1s%s" % (xcen, ycensign, numpy.abs(ycen)), xwidth*cdelt,
tempheader, system=out_system, height=ywidth*cdelt,
pix_size=cdelt*3600.0, rotation=pa)
os.system("sed -i bck '/END/d' %s" % (tempheader))
newheader2 = fits.Header()
newheader2.fromTxtFile(tempheader)
#newheader2.fromtextfile(tempheader)
for key in ('CRPIX3','CRVAL3','CDELT3','CD3_3','CUNIT3','WCSTYPE3','CTYPE3'):
if newheader.get(key):
newheader2[key] = newheader.get(key)
if newheader.get('CD3_3') and newheader2.get('CDELT3') is None:
newheader2['CDELT3'] = newheader.get('CD3_3')
newheader2.toTxtFile(tempheader,clobber=True)
#if newheader2.get('CDELT3') is None:
# raise Exception("No CD3_3 or CDELT3 in header.")
else:
if isinstance(newheader,str):
newheader2 = fits.Header()
newheader2.fromTxtFile(newheader)
tempheader = tempfile.mktemp(suffix='.hdr')
newheader2.toTxtFile(tempheader,clobber=True)
montage.wrappers.reproject_cube(tempcube,outname,header=tempheader,clobber=clobber)
#print "\n",outname
#os.system('imhead %s | grep CDELT' % outname)
# AWFUL hack because montage removes CDELT3
tempcube = fits.open(outname)
tempcube.header = newheader2
#if tempcube.header.get('CDELT3') is None:
# raise Exception("No CD3_3 or CDELT3 in header.")
#print tempcube.header.get('CDELT3')
tempcube.writeto(outname,clobber=True)
#print tempcube.get('CDELT3')
#print "\n",outname
#os.system('imhead %s | grep CDELT' % outname)
return
def resample_cube(cubefilename, header):
inhdr = fits.getheader(cubefilename)
except:
pass
| 36.675429 | 126 | 0.595961 |
7948a2954b16df15ef10ffe43910ec6d08118d3e
| 123 |
py
|
Python
|
experiments/iterfibo.py
|
johnpaulguzman/Algorithm-Analyzer
|
e93abfb51f2f67b6df1af8d95cc6855ad7de69f2
|
[
"MIT"
] | null | null | null |
experiments/iterfibo.py
|
johnpaulguzman/Algorithm-Analyzer
|
e93abfb51f2f67b6df1af8d95cc6855ad7de69f2
|
[
"MIT"
] | null | null | null |
experiments/iterfibo.py
|
johnpaulguzman/Algorithm-Analyzer
|
e93abfb51f2f67b6df1af8d95cc6855ad7de69f2
|
[
"MIT"
] | null | null | null |
def f(n):
nm1=1
nm2=0
for i in range(n-1):
temp=nm1+nm2
nm2=nm1
nm1=temp
return nm1
| 15.375 | 24 | 0.471545 |
7948a36ce4f60bf2de9cb7e2c67d28371576195e
| 3,164 |
py
|
Python
|
qa/rpc-tests/bip65-cltv.py
|
cryptoandcoffee/DCUMN
|
85b873a90b3a2df6870d7ea74ea5087945e238bb
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/bip65-cltv.py
|
cryptoandcoffee/DCUMN
|
85b873a90b3a2df6870d7ea74ea5087945e238bb
|
[
"MIT"
] | null | null | null |
qa/rpc-tests/bip65-cltv.py
|
cryptoandcoffee/DCUMN
|
85b873a90b3a2df6870d7ea74ea5087945e238bb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test the CHECKLOCKTIMEVERIFY (BIP65) soft-fork logic
#
from test_framework.test_framework import DCUTestFramework
from test_framework.util import *
class BIP65Test(DCUTestFramework):
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, []))
self.nodes.append(start_node(1, self.options.tmpdir, ["-blockversion=3"]))
self.nodes.append(start_node(2, self.options.tmpdir, ["-blockversion=4"]))
connect_nodes(self.nodes[1], 0)
connect_nodes(self.nodes[2], 0)
self.is_network_split = False
self.sync_all()
def run_test(self):
cnt = self.nodes[0].getblockcount()
# Mine some old-version blocks
self.nodes[1].generate(100)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 100):
raise AssertionError("Failed to mine 100 version=3 blocks")
# Mine 750 new-version blocks
for i in xrange(15):
self.nodes[2].generate(50)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 850):
raise AssertionError("Failed to mine 750 version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are not enforced
# Mine 1 new-version block
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 851):
raise AssertionError("Failed to mine a version=4 blocks")
# TODO: check that new CHECKLOCKTIMEVERIFY rules are enforced
# Mine 198 new-version blocks
for i in xrange(2):
self.nodes[2].generate(99)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1049):
raise AssertionError("Failed to mine 198 version=4 blocks")
# Mine 1 old-version block
self.nodes[1].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1050):
raise AssertionError("Failed to mine a version=3 block after 949 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Failed to mine a version=4 block")
# Mine 1 old-version blocks
try:
self.nodes[1].generate(1)
raise AssertionError("Succeeded to mine a version=3 block after 950 version=4 blocks")
except JSONRPCException:
pass
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1051):
raise AssertionError("Accepted a version=3 block after 950 version=4 blocks")
# Mine 1 new-version blocks
self.nodes[2].generate(1)
self.sync_all()
if (self.nodes[0].getblockcount() != cnt + 1052):
raise AssertionError("Failed to mine a version=4 block")
if __name__ == '__main__':
BIP65Test().main()
| 35.954545 | 98 | 0.625474 |
7948a475bbaad5978368f1d68372174e4b7a8ab7
| 5,703 |
py
|
Python
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
mohammadzainabbas/tensorflow
|
352142267a1a151b04c6198de83b40b7e979d1d8
|
[
"Apache-2.0"
] | 4 |
2019-02-18T17:38:14.000Z
|
2019-09-05T14:14:47.000Z
|
tensorflow/python/kernel_tests/manip_ops_test.py
|
mohammadzainabbas/tensorflow
|
352142267a1a151b04c6198de83b40b7e979d1d8
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/kernel_tests/manip_ops_test.py
|
mohammadzainabbas/tensorflow
|
352142267a1a151b04c6198de83b40b7e979d1d8
|
[
"Apache-2.0"
] | 3 |
2018-04-11T03:08:18.000Z
|
2021-05-09T21:51:42.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for manip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import manip_ops
from tensorflow.python.platform import test as test_lib
# pylint: disable=g-import-not-at-top
try:
from distutils.version import StrictVersion as Version
# numpy.roll for multiple shifts was introduced in numpy version 1.12.0
NP_ROLL_CAN_MULTISHIFT = Version(np.version.version) >= Version("1.12.0")
except ImportError:
NP_ROLL_CAN_MULTISHIFT = False
# pylint: enable=g-import-not-at-top
class RollTest(test_util.TensorFlowTestCase):
def _testRoll(self, np_input, shift, axis):
expected_roll = np.roll(np_input, shift, axis)
with self.test_session():
roll = manip_ops.roll(np_input, shift, axis)
self.assertAllEqual(roll.eval(), expected_roll)
def _testGradient(self, np_input, shift, axis):
with self.test_session():
inx = constant_op.constant(np_input.tolist())
xs = list(np_input.shape)
y = manip_ops.roll(inx, shift, axis)
# Expected y's shape to be the same
ys = xs
jacob_t, jacob_n = gradient_checker.compute_gradient(
inx, xs, y, ys, x_init_value=np_input)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5)
def _testAll(self, np_input, shift, axis):
self._testRoll(np_input, shift, axis)
if np_input.dtype == np.float32:
self._testGradient(np_input, shift, axis)
def testIntTypes(self):
for t in [np.int32, np.int64]:
self._testAll(np.random.randint(-100, 100, (5)).astype(t), 3, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(
np.random.randint(-100, 100, (4, 4, 3)).astype(t), [1, -2, 3],
[0, 1, 2])
self._testAll(
np.random.randint(-100, 100, (4, 2, 1, 3)).astype(t), [0, 1, -2],
[1, 2, 3])
def testFloatTypes(self):
for t in [np.float32, np.float64]:
self._testAll(np.random.rand(5).astype(t), 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
self._testAll(np.random.rand(3, 4).astype(t), [1, 2], [1, 0])
self._testAll(np.random.rand(1, 3, 4).astype(t), [1, 0, -3], [0, 1, 2])
def testComplexTypes(self):
for t in [np.complex64, np.complex128]:
x = np.random.rand(4, 4).astype(t)
self._testAll(x + 1j * x, 2, 0)
if NP_ROLL_CAN_MULTISHIFT:
x = np.random.rand(2, 5).astype(t)
self._testAll(x + 1j * x, [1, 2], [1, 0])
x = np.random.rand(3, 2, 1, 1).astype(t)
self._testAll(x + 1j * x, [2, 1, 1, 0], [0, 3, 1, 2])
def testNegativeAxis(self):
self._testAll(np.random.randint(-100, 100, (5)).astype(np.int32), 3, -1)
self._testAll(np.random.randint(-100, 100, (4, 4)).astype(np.int32), 3, -2)
# Make sure negative axis shoudl be 0 <= axis + dims < dims
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(np.random.randint(-100, 100, (4, 4)).astype(np.int32),
3, -10).eval()
def testRollInputMustVectorHigherRaises(self):
tensor = 7
shift = 1
axis = 0
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"input must be 1-D or higher"):
manip_ops.roll(tensor, shift, axis).eval()
def testRollAxisMustBeScalarOrVectorRaises(self):
tensor = [[1, 2], [3, 4]]
shift = 1
axis = [[0, 1]]
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"axis must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval()
def testRollShiftMustBeScalarOrVectorRaises(self):
tensor = [[1, 2], [3, 4]]
shift = [[0, 1]]
axis = 1
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift must be a scalar or a 1-D vector"):
manip_ops.roll(tensor, shift, axis).eval()
def testRollShiftAndAxisMustBeSameSizeRaises(self):
tensor = [[1, 2], [3, 4]]
shift = [1]
axis = [0, 1]
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"shift and axis must have the same size"):
manip_ops.roll(tensor, shift, axis).eval()
def testRollAxisOutOfRangeRaises(self):
tensor = [1, 2]
shift = 1
axis = 1
with self.test_session():
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"is out of range"):
manip_ops.roll(tensor, shift, axis).eval()
if __name__ == "__main__":
test_lib.main()
| 38.275168 | 80 | 0.639663 |
7948a85bf1e2af8f733a54d6e41d236c50ce9fbb
| 1,858 |
py
|
Python
|
examples/exampleDepthImageOpenCV.py
|
i-zro/pyKinectAzure
|
291c0398169a7cc9143e883b32eea581d77ecae3
|
[
"MIT"
] | null | null | null |
examples/exampleDepthImageOpenCV.py
|
i-zro/pyKinectAzure
|
291c0398169a7cc9143e883b32eea581d77ecae3
|
[
"MIT"
] | null | null | null |
examples/exampleDepthImageOpenCV.py
|
i-zro/pyKinectAzure
|
291c0398169a7cc9143e883b32eea581d77ecae3
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(1, '../pyKinectAzure/')
import numpy as np
from pyKinectAzure import pyKinectAzure, _k4a
import cv2
# Path to the module
# TODO: Modify with the path containing the k4a.dll from the Azure Kinect SDK
modulePath = 'C:\\Program Files\\Azure Kinect SDK v1.4.1\\sdk\\windows-desktop\\amd64\\release\\bin\\k4a.dll'
# under x86_64 linux please use r'/usr/lib/x86_64-linux-gnu/libk4a.so'
# In Jetson please use r'/usr/lib/aarch64-linux-gnu/libk4a.so'
if __name__ == "__main__":
# Initialize the library with the path containing the module
pyK4A = pyKinectAzure(modulePath)
# Open device
pyK4A.device_open()
# Modify camera configuration
device_config = pyK4A.config
device_config.color_resolution = _k4a.K4A_COLOR_RESOLUTION_1080P
device_config.depth_mode = _k4a.K4A_DEPTH_MODE_WFOV_2X2BINNED
print(device_config)
# Start cameras using modified configuration
pyK4A.device_start_cameras(device_config)
k = 0
while True:
# Get capture
pyK4A.device_get_capture()
# Get the depth image from the capture
depth_image_handle = pyK4A.capture_get_depth_image()
# Check the image has been read correctly
if depth_image_handle:
# Read and convert the image data to numpy array:
depth_image = pyK4A.image_convert_to_numpy(depth_image_handle)
depth_color_image = cv2.convertScaleAbs (depth_image, alpha=0.05) #alpha is fitted by visual comparison with Azure k4aviewer results
depth_color_image = cv2.applyColorMap(depth_color_image, cv2.COLORMAP_JET)
cv2.namedWindow('Colorized Depth Image',cv2.WINDOW_NORMAL)
cv2.VideoWriter(cv2.imshow('Colorized Depth Image',depth_color_image))
k = cv2.waitKey(1)
# Release the image
pyK4A.image_release(depth_image_handle)
pyK4A.capture_release()
if k==27: # Esc key to stop
break
pyK4A.device_stop_cameras()
pyK4A.device_close()
| 29.967742 | 138 | 0.770721 |
7948a86d5cd5800ac8b7aa2e34a4234d89ec72e2
| 4,822 |
py
|
Python
|
tests/tree_build_tests.py
|
ascott1/regulations-parser
|
1d653ec2d78c9cbfd3b0c651788e5ab14dcc76ca
|
[
"CC0-1.0"
] | 1 |
2019-12-29T17:52:32.000Z
|
2019-12-29T17:52:32.000Z
|
tests/tree_build_tests.py
|
ascott1/regulations-parser
|
1d653ec2d78c9cbfd3b0c651788e5ab14dcc76ca
|
[
"CC0-1.0"
] | null | null | null |
tests/tree_build_tests.py
|
ascott1/regulations-parser
|
1d653ec2d78c9cbfd3b0c651788e5ab14dcc76ca
|
[
"CC0-1.0"
] | null | null | null |
#vim: set encoding=utf-8
from unittest import TestCase
from regparser.tree.struct import Node, NodeEncoder
from regparser.tree.build import *
class TreeBuildTest(TestCase):
def test_find_cfr_part(self):
text = "PART 202-Content\nSome text here\n"
text += "This has 201.44 in it. But also 203.33\n"
text += "But then, 201.33 returns."
self.assertEqual(202, find_cfr_part(text))
def test_build_whole_regtree(self):
"""Integration test for the plain-text regulation tree parser"""
text = "PART 200-Regulation Q\n"
text += u"§ 200.1 First section.\n"
text += "(a) First par\n"
text += "(b) Second par\n"
text += u"§ 200.2 Second section.\n"
text += "Content without sub pars\n"
text += "Appendix A to Part 200 - Appendix Title\n"
text += "A-1 Appendix 1\n"
text += "(a) Appendix par 1\n"
text += "Supplement I to Part 200 - Official Interpretations\n"
text += "Section 200.2 Second section\n"
text += "2(a)(5) First par\n"
text += "1. Commentary 1\n"
text += "2. Commentary 2\n"
node201 = Node("\n", label=['200', '1'],
title=u"§ 200.1 First section.", children=[
Node(u"(a) First par\n", label=["200", "1", "a"]),
Node(u"(b) Second par\n", label=["200", "1", "b"])
])
node202 = Node("\nContent without sub pars\n", label=["200", "2"],
title=u"§ 200.2 Second section.")
nodeA = Node(
"\n", label=["200", "A"], node_type=Node.APPENDIX,
title="Appendix A to Part 200 - Appendix Title", children=[
Node("\n",
label=["200", "A", "1"],
title="A-1 Appendix 1",
node_type=Node.APPENDIX,
children=[Node("(a) Appendix par 1\n",
node_type=Node.APPENDIX,
label=["200", "A", "1", "a"])])
]
)
nodeI1 = Node('1. Commentary 1\n', node_type=Node.INTERP,
label=['200', '2', 'a', '5', Node.INTERP_MARK, '1'])
nodeI2 = Node('2. Commentary 2\n', node_type=Node.INTERP,
label=['200', '2', 'a', '5', Node.INTERP_MARK, '2'])
nodeI = Node(
'\n', label=['200', Node.INTERP_MARK], node_type=Node.INTERP,
title='Supplement I to Part 200 - Official Interpretations',
children=[
Node('\n', label=['200', '2', Node.INTERP_MARK],
node_type=Node.INTERP,
title='Section 200.2 Second section',
children=[
Node('\n', label=['200', '2', 'a', '5',
Node.INTERP_MARK],
node_type=Node.INTERP, title='2(a)(5) First par',
children=[nodeI1, nodeI2])
])
]
)
nodeEP = Node('', label=['200', 'Subpart'], title='',
children=[node201, node202], node_type=Node.EMPTYPART)
res = build_whole_regtree(text)
# Convert to JSON so we can ignore some unicode issues
enc = NodeEncoder(sort_keys=True)
self.assertEqual(
enc.encode(build_whole_regtree(text)),
enc.encode(Node("\n", label=["200"], title="PART 200-Regulation Q",
children=[nodeEP, nodeA, nodeI]))
)
def test_build_whole_regtree_missing_interp(self):
"""Not all regs have an interpretation section."""
text = "PART 200-Regulation Q\n"
text += u"§ 200.1 First section.\n"
text += "Section content\n"
text += "Appendix A to Part 200 - Appendix Title\n"
text += "Appendix content"
node200_1 = Node("\nSection content\n", label=['200', '1'],
title=u"§ 200.1 First section.", children=[],
node_type=Node.REGTEXT)
nodeA = Node("\nAppendix content", label=["200", "A"],
title="Appendix A to Part 200 - Appendix Title",
children=[], node_type=Node.APPENDIX)
nodeEP = Node('', label=['200', 'Subpart'], title='',
children=[node200_1], node_type=Node.EMPTYPART)
res = build_whole_regtree(text)
# Convert to JSON so we can ignore some unicode issues
enc = NodeEncoder(sort_keys=True)
self.assertEqual(
enc.encode(build_whole_regtree(text)),
enc.encode(Node("\n", label=["200"], title="PART 200-Regulation Q",
children=[nodeEP, nodeA]))
)
| 44.238532 | 79 | 0.502074 |
7948a8dee45b1c0510e1492c13b0b678118d8c22
| 2,138 |
py
|
Python
|
ripper/actions/attack.py
|
alexmon1989/russia_ddos
|
6bee2718a4d9fb9a495ffe7063a3dfc68bdafa0d
|
[
"MIT"
] | 199 |
2022-02-28T23:28:02.000Z
|
2022-03-30T18:00:45.000Z
|
ripper/actions/attack.py
|
alexmon1989/russia_ddos
|
6bee2718a4d9fb9a495ffe7063a3dfc68bdafa0d
|
[
"MIT"
] | 14 |
2022-03-05T21:48:34.000Z
|
2022-03-18T12:28:36.000Z
|
ripper/actions/attack.py
|
alexmon1989/russia_ddos
|
6bee2718a4d9fb9a495ffe7063a3dfc68bdafa0d
|
[
"MIT"
] | 40 |
2022-03-02T00:19:31.000Z
|
2022-03-28T01:48:09.000Z
|
import threading
from threading import Thread, Event
from ripper.actions.attack_method import AttackMethod
from ripper.actions.http_bypass import HttpBypass
from ripper.actions.http_flood import HttpFlood
from ripper.actions.tcp_flood import TcpFlood
from ripper.actions.udp_flood import UdpFlood
from ripper.context.events_journal import EventsJournal
# Forward Reference
Context = 'Context'
Target = 'Target'
events_journal = EventsJournal()
# noinspection PyTypeChecker
attack_methods: list[AttackMethod] = [
UdpFlood,
TcpFlood,
HttpFlood,
HttpBypass,
]
attack_method_labels: list[str] = list(map(lambda am: am.label, attack_methods))
def attack_method_factory(_ctx: Context, target: Target):
attack_method_name = target.attack_method
# events_journal.info(f'Set attack method to {target.attack_method}', target=target)
if attack_method_name == 'udp-flood':
return UdpFlood(target=target, context=_ctx)
elif attack_method_name == 'http-flood':
return HttpFlood(target=target, context=_ctx)
elif attack_method_name == 'tcp-flood':
return TcpFlood(target=target, context=_ctx)
elif attack_method_name == 'http-bypass':
return HttpBypass(target=target, context=_ctx)
# Dangerous, may lead to exception
return None
class Attack(Thread):
"""This class creates threads with specified attack method."""
_ctx: Context
target: Target
stop_event: Event = None
def __init__(self, _ctx: Context, target: Target):
"""
:param target: Target IPv4 address and destination port.
:param method: Attack method.
"""
Thread.__init__(self, daemon=True)
self._ctx = _ctx
self.target = target
self.target.add_attack_thread(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
self.target.init()
runner = attack_method_factory(_ctx=self._ctx, target=self.target)
if self._ctx.dry_run:
runner()
exit(0)
while not self.stop_event.is_set():
runner()
| 28.891892 | 88 | 0.696445 |
7948a9328ce96d6bfe1e08f5e982d07c27c362a5
| 7,094 |
py
|
Python
|
visualize/grad_cam.py
|
apyrros/HCC-comorbidities
|
fd74fb2f1438bc741cfe6728c5cb64737bc99d68
|
[
"MIT"
] | null | null | null |
visualize/grad_cam.py
|
apyrros/HCC-comorbidities
|
fd74fb2f1438bc741cfe6728c5cb64737bc99d68
|
[
"MIT"
] | null | null | null |
visualize/grad_cam.py
|
apyrros/HCC-comorbidities
|
fd74fb2f1438bc741cfe6728c5cb64737bc99d68
|
[
"MIT"
] | 1 |
2021-09-15T14:16:35.000Z
|
2021-09-15T14:16:35.000Z
|
#!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: http://kazuto1011.github.io
# Created: 2017-05-26
from collections import Sequence
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
class _BaseWrapper(object):
def __init__(self, model):
super(_BaseWrapper, self).__init__()
self.device = next(model.parameters()).device
self.model = model
self.handlers = [] # a set of hook function handlers
def _encode_one_hot(self, ids):
one_hot = torch.zeros_like(self.outputs).to(self.device)
one_hot.scatter_(1, ids, 1.0)
return one_hot
def forward(self, image):
self.image_shape = image.shape[2:]
self.outputs = self.model(image)
self.scores = torch.zeros_like(self.outputs).to(self.device)
for i in range(0, 33, 3):
s = i
e = i + 3
self.scores[:, s:e] = F.softmax(self.outputs[:, s:e], dim=-1)
self.scores[:, 33:] = self.outputs[:, 33:]
return self.outputs, self.scores
def backward(self, ids):
"""
Class-specific backpropagation
"""
one_hot = self._encode_one_hot(ids)
self.model.zero_grad()
self.outputs.backward(gradient=one_hot, retain_graph=True)
def generate(self):
raise NotImplementedError
def remove_hook(self):
"""
Remove all the forward/backward hook functions
"""
for handle in self.handlers:
handle.remove()
class BackPropagation(_BaseWrapper):
def forward(self, image):
self.image = image.requires_grad_()
return super(BackPropagation, self).forward(self.image)
def generate(self):
gradient = self.image.grad.clone()
self.image.grad.zero_()
return gradient
class GuidedBackPropagation(BackPropagation):
"""
"Striving for Simplicity: the All Convolutional Net"
https://arxiv.org/pdf/1412.6806.pdf
Look at Figure 1 on page 8.
"""
def __init__(self, model):
super(GuidedBackPropagation, self).__init__(model)
def backward_hook(module, grad_in, grad_out):
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (F.relu(grad_in[0]),)
for module in self.model.named_modules():
self.handlers.append(module[1].register_backward_hook(backward_hook))
class Deconvnet(BackPropagation):
"""
"Striving for Simplicity: the All Convolutional Net"
https://arxiv.org/pdf/1412.6806.pdf
Look at Figure 1 on page 8.
"""
def __init__(self, model):
super(Deconvnet, self).__init__(model)
def backward_hook(module, grad_in, grad_out):
# Cut off negative gradients and ignore ReLU
if isinstance(module, nn.ReLU):
return (F.relu(grad_out[0]),)
for module in self.model.named_modules():
self.handlers.append(module[1].register_backward_hook(backward_hook))
class GradCAM(_BaseWrapper):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure 2 on page 4
"""
def __init__(self, model, candidate_layers=None):
super(GradCAM, self).__init__(model)
self.fmap_pool = {}
self.grad_pool = {}
self.candidate_layers = candidate_layers # list
def save_fmaps(key):
def forward_hook(module, input, output):
self.fmap_pool[key] = output.detach()
return forward_hook
def save_grads(key):
def backward_hook(module, grad_in, grad_out):
self.grad_pool[key] = grad_out[0].detach()
return backward_hook
# If any candidates are not specified, the hook is registered to all the layers.
for name, module in self.model.named_modules():
if self.candidate_layers is None or name in self.candidate_layers:
self.handlers.append(module.register_forward_hook(save_fmaps(name)))
self.handlers.append(module.register_backward_hook(save_grads(name)))
def _find(self, pool, target_layer):
if target_layer in pool.keys():
return pool[target_layer]
else:
raise ValueError("Invalid layer name: {}".format(target_layer))
def generate(self, target_layer):
fmaps = self._find(self.fmap_pool, target_layer)
grads = self._find(self.grad_pool, target_layer)
if len(grads.shape) == 2:
grads = grads.unsqueeze(-1).unsqueeze(-1)
weights = F.adaptive_avg_pool2d(grads, 1)
gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)
gcam = F.relu(gcam)
gcam = F.interpolate(
gcam, self.image_shape, mode="bilinear", align_corners=False
)
B, C, H, W = gcam.shape
gcam = gcam.view(B, -1)
gcam -= gcam.min(dim=1, keepdim=True)[0]
gcam /= gcam.max(dim=1, keepdim=True)[0]
gcam = gcam.view(B, C, H, W)
return gcam
def occlusion_sensitivity(
model, images, ids, mean=None, patch=35, stride=1, n_batches=128
):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure A5 on page 17
Originally proposed in:
"Visualizing and Understanding Convolutional Networks"
https://arxiv.org/abs/1311.2901
"""
torch.set_grad_enabled(False)
model.eval()
mean = mean if mean else 0
patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch)
pad_H, pad_W = patch_H // 2, patch_W // 2
# Padded image
images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean)
B, _, H, W = images.shape
new_H = (H - patch_H) // stride + 1
new_W = (W - patch_W) // stride + 1
# Prepare sampling grids
anchors = []
grid_h = 0
while grid_h <= H - patch_H:
grid_w = 0
while grid_w <= W - patch_W:
grid_w += stride
anchors.append((grid_h, grid_w))
grid_h += stride
# Baseline score without occlusion
baseline = model(images).detach().gather(1, ids)
# Compute per-pixel logits
scoremaps = []
for i in tqdm(range(0, len(anchors), n_batches), leave=False):
batch_images = []
batch_ids = []
for grid_h, grid_w in anchors[i : i + n_batches]:
images_ = images.clone()
images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean
batch_images.append(images_)
batch_ids.append(ids)
batch_images = torch.cat(batch_images, dim=0)
batch_ids = torch.cat(batch_ids, dim=0)
scores = model(batch_images).detach().gather(1, batch_ids)
scoremaps += list(torch.split(scores, B))
diffmaps = torch.cat(scoremaps, dim=1) - baseline
diffmaps = diffmaps.view(B, new_H, new_W)
return diffmaps
| 31.528889 | 88 | 0.622075 |
7948a9b1ab44c65b3cc37fd4ed08c5d7d0f9eeac
| 5,884 |
py
|
Python
|
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
imadharb75/mynewid
|
b417edc71bfe38eb167909509ce2a2de526bc83e
|
[
"Apache-2.0"
] | null | null | null |
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
imadharb75/mynewid
|
b417edc71bfe38eb167909509ce2a2de526bc83e
|
[
"Apache-2.0"
] | null | null | null |
indy_node/test/catchup/test_requests_post_new_node_catchup.py
|
imadharb75/mynewid
|
b417edc71bfe38eb167909509ce2a2de526bc83e
|
[
"Apache-2.0"
] | 1 |
2020-03-19T00:37:32.000Z
|
2020-03-19T00:37:32.000Z
|
from plenum.common.constants import DOMAIN_LEDGER_ID
from plenum.common.util import randomString
from plenum.test.node_catchup.helper import checkNodeDataForEquality, \
waitNodeDataEquality
from plenum.test.pool_transactions.helper import sdk_add_new_nym
from plenum.test.test_node import ensure_node_disconnected, checkNodesConnected
from indy_node.test.conftest import sdk_node_theta_added
from indy_node.test.helper import TestNode, sdk_add_raw_attribute
from indy_common.config_helper import NodeConfigHelper
def test_new_node_catchup_update_projection(looper,
nodeSet, tconf, tdir,
sdk_pool_handle,
sdk_wallet_trustee,
allPluginsPath,
some_transactions_done):
"""
A node which receives txns from catchup updates both ledger and projection
4 nodes start up and some txns happen, after txns are done, new node joins
and starts catching up, the node should not process requests while catchup
is in progress. Make sure the new requests are coming from the new NYMs
added while the node was offline or catching up.
"""
# Create a new node and stop it.
new_steward_wallet, new_node = sdk_node_theta_added(looper,
nodeSet,
tdir,
tconf,
sdk_pool_handle,
sdk_wallet_trustee,
allPluginsPath,
node_config_helper_class=NodeConfigHelper,
testNodeClass=TestNode)
waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
ta_count = 2
np_count = 2
new_txn_count = 2 * ta_count + np_count # Since ATTRIB txn is done for TA
old_ledger_sizes = {}
new_ledger_sizes = {}
old_projection_sizes = {}
new_projection_sizes = {}
old_seq_no_map_sizes = {}
new_seq_no_map_sizes = {}
def get_ledger_size(node):
return len(node.domainLedger)
def get_projection_size(node):
domain_state = node.getState(DOMAIN_LEDGER_ID)
return len(domain_state.as_dict)
def get_seq_no_map_size(node):
return node.seqNoDB.size
def fill_counters(ls, ps, ss, nodes):
for n in nodes:
ls[n.name] = get_ledger_size(n)
ps[n.name] = get_projection_size(n)
ss[n.name] = get_seq_no_map_size(n)
def check_sizes(nodes):
for node in nodes:
assert new_ledger_sizes[node.name] - \
old_ledger_sizes[node.name] == new_txn_count
assert new_projection_sizes[node.name] - \
old_projection_sizes[node.name] == new_txn_count
assert new_seq_no_map_sizes[node.name] - \
old_seq_no_map_sizes[node.name] == new_txn_count
# Stop a node and note down the sizes of ledger and projection (state)
other_nodes = nodeSet[:-1]
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
other_nodes)
new_node.cleanupOnStopping = False
new_node.stop()
looper.removeProdable(new_node)
ensure_node_disconnected(looper, new_node, other_nodes)
trust_anchors = []
attributes = []
for i in range(ta_count):
trust_anchors.append(
sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
role='TRUST_ANCHOR', alias='TA' + str(i)))
attributes.append((randomString(6), randomString(10)))
sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1],
*attributes[-1])
non_privileged = []
for i in range(np_count):
non_privileged.append(
sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
alias='NP' + str(i)))
checkNodeDataForEquality(nodeSet[0], *other_nodes)
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
other_nodes)
# The size difference should be same as number of new NYM txns
check_sizes(other_nodes)
config_helper = NodeConfigHelper(new_node.name, tconf, chroot=tdir)
new_node = TestNode(
new_node.name,
config_helper=config_helper,
config=tconf,
pluginPaths=allPluginsPath,
ha=new_node.nodestack.ha,
cliha=new_node.clientstack.ha)
looper.add(new_node)
nodeSet[-1] = new_node
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
[new_node])
looper.run(checkNodesConnected(nodeSet))
waitNodeDataEquality(looper, new_node, *other_nodes)
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
[new_node])
check_sizes([new_node])
# Set the old counters to be current ledger and projection size
fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
nodeSet)
more_nyms_count = 2
for wh in trust_anchors:
for i in range(more_nyms_count):
non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh,
alias='NP1' + str(i)))
# The new node should process transactions done by Nyms added to its
# ledger while catchup
fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
nodeSet)
new_txn_count = more_nyms_count * len(trust_anchors)
check_sizes(nodeSet)
| 42.948905 | 98 | 0.617267 |
7948ab54a21c528d8e19f8b45141a191f75d8d67
| 12,446 |
py
|
Python
|
floodlight/io/tracab.py
|
manuba95/floodlight
|
02c64763e78b14ef37555e1dc435256dbd4eca09
|
[
"MIT"
] | 26 |
2022-01-25T22:15:22.000Z
|
2022-03-02T11:06:29.000Z
|
floodlight/io/tracab.py
|
manuba95/floodlight
|
02c64763e78b14ef37555e1dc435256dbd4eca09
|
[
"MIT"
] | 12 |
2021-11-17T15:42:28.000Z
|
2022-03-28T19:59:24.000Z
|
floodlight/io/tracab.py
|
manuba95/floodlight
|
02c64763e78b14ef37555e1dc435256dbd4eca09
|
[
"MIT"
] | 8 |
2021-11-03T09:01:22.000Z
|
2022-03-23T10:43:14.000Z
|
from pathlib import Path
from typing import Dict, Tuple, Union
import numpy as np
from lxml import etree
from floodlight.core.code import Code
from floodlight.core.pitch import Pitch
from floodlight.core.xy import XY
def _read_metadata(filepath_metadata: Union[str, Path]) -> Tuple[Dict, Dict, Pitch]:
"""Reads TRACAB's metadata file and extracts information about match metainfo,
periods and the pitch.
Parameters
----------
filepath_metadata: str or pathlib.Path
Full path to metadata.xml file.
Returns
-------
metainfo: Dict
Dictionary with metainformation such as framerate.
periods: Dict
Dictionary with start and endframes:
`periods[segment] = (startframe, endframe)`.
pitch: Pitch
Pitch object with actual pitch length and width.
"""
# set up XML tree
tree = etree.parse(str(filepath_metadata))
root = tree.getroot()
# parse XML file, extract matchinfo and period start/endframes
metadata = {}
periods = {}
attributes = root.find("match").attrib
framerate = attributes.get("iFrameRateFps")
metadata["framerate"] = int(framerate) if framerate else None
length = attributes.get("fPitchXSizeMeters")
metadata["length"] = float(length) if length else None
width = attributes.get("fPitchYSizeMeters")
metadata["width"] = float(width) if width else None
for elem in root.findall("match/period"):
if elem.attrib["iEndFrame"] != "0":
segment = "HT" + elem.attrib["iId"]
start = int(elem.attrib["iStartFrame"])
end = int(elem.attrib["iEndFrame"])
periods[segment] = (start, end)
pitch = Pitch.from_template(
"tracab",
length=float(metadata["length"]),
width=float(metadata["width"]),
sport="football",
)
return metadata, periods, pitch
def _read_dat_single_line(
package: str,
) -> Tuple[
int, Dict[str, Dict[str, Tuple[float, float, float]]], Dict[str, Union[str, tuple]]
]:
"""Extracts all relevant information from a single line of TRACAB's .dat file
(i.e. one frame of data).
Parameters
----------
package: str
One full line from TRACAB's .dat-file, equals one "package" according to the
file-format documentation.
Returns
-------
frame_number: int
The number of current frame.
positions: Dict[str, Dict[str, Tuple[float, float, float]]]
Nested dictionary that stores player position information for each team and
player. Has the form `positions[team][jID] = (x, y, speed)`.
ball: Dict[str]
Dictionary with ball information. Has keys 'position', 'possession' and
'ballstatus'.
"""
# bins
positions = {"Home": {}, "Away": {}, "Other": {}}
ball = {}
# split package to chunks
chunk1, chunk2, chunk3, _ = package.split(sep=":")
# first chunk (frame number)
frame_number = int(chunk1)
# second chunk (player positions)
targets = chunk2[:-1].split(sep=";")
for t in targets:
player_data = t.split(sep=",")
# type conversions
team, system_id, jID = map(lambda x: int(x), player_data[:3])
x, y, speed = map(lambda x: float(x), player_data[3:])
if team == 1:
team = "Home"
elif team == 0:
team = "Away"
else:
team = "Other"
# assign
positions[team][jID] = (x, y, speed)
# third chunk (ball data)
ball_data = chunk3.split(sep=",")[:6]
ball["position"] = tuple(map(lambda x: float(x), ball_data[:2]))
ball["possession"] = ball_data[4]
ball["ballstatus"] = ball_data[5][0]
return frame_number, positions, ball
def _frame_in_period(
frame_number: int, periods: Dict[str, Tuple[int, int]]
) -> Union[str, None]:
"""Checks if a given frame is within the range of start- and endframe for all
periods and returns the name of the period the frame belongs to, or None if it
can't find any.
Parameters
----------
frame_number: int
Frame number to be checked.
periods: Dict[str, Tuple[int, int]]
Dictionary with period start- and endframes of the form
`periods[segment] = (startframe, endframe)` as it is returned by
:meth:`floodlight.io.tracab._read_metadata`.
Returns
-------
segment: str or None
Name of the segment the frame belongs to, or None if it does not belong to any
of the supplied segments.
"""
# determine current segment by iterating through all segments (i)
segment = None
for i in periods.keys():
if frame_number in range(periods[i][0], periods[i][1] + 1):
segment = i
return segment
def _read_dat_jersey_numbers(filepath_dat: Union[str, Path]):
"""Reads entire TRACAB .dat file and extracts unique set of jIDs (jerseynumbers)
for both teams.
Parameters
----------
filepath_dat: str or pathlib.Path
Full path to .dat file.
Returns
-------
home_jIDs: set
away_jIDs: set
"""
# bins
home_jIDs = set()
away_jIDs = set()
# loop
with open(str(filepath_dat), "r") as f:
while True:
package = f.readline()
# terminate if at end of file
if len(package) == 0:
break
# read line
_, positions, _ = _read_dat_single_line(package)
# Extract jersey numbers
home_jIDs |= positions["Home"].keys()
away_jIDs |= positions["Away"].keys()
return home_jIDs, away_jIDs
def create_links_from_dat(filepath_dat: Union[str, Path]) -> Dict[str, Dict[int, int]]:
"""Parses the entire TRACAB .dat file for unique jIDs (jerseynumbers) and creates a
dictionary linking jIDs to xIDs in ascending order.
Parameters
----------
filepath_dat: str or pathlib.Path
Full path to .dat file.
Returns
-------
links: Dict[str, Dict[int, int]]
Link-dictionary of the form `links[team][jID] = xID`.
"""
homejrsy, awayjrsy = _read_dat_jersey_numbers(filepath_dat)
homejrsy = list(homejrsy)
awayjrsy = list(awayjrsy)
homejrsy.sort()
awayjrsy.sort()
links = {
"Home": {jID: xID for xID, jID in enumerate(homejrsy)},
"Away": {jID: xID for xID, jID in enumerate(awayjrsy)},
}
return links
def read_tracab_files(
filepath_dat: Union[str, Path],
filepath_metadata: Union[str, Path],
links: Dict[str, Dict[int, int]] = None,
) -> Tuple[XY, XY, XY, XY, XY, XY, Code, Code, Code, Code, Pitch]:
"""Parse TRACAB files and extract position data, possession and ballstatus codes as
well as pitch information.
ChyronHego's TRACAB system delivers two separate files, a .dat file containing the
actual data as well as a metadata.xml containing information about pitch size,
framerate and start- and endframes of match periods. This function provides a
high-level access to TRACAB data by parsing "the full match" given both files.
Parameters
----------
filepath_dat: str or pathlib.Path
Full path to dat-file.
filepath_metadata: str or pathlib.Path
Full path to metadata.xml file.
links: Dict[str, Dict[int, int]], optional
A link dictionary of the form `links[team][jID] = xID`. Player's are identified
in TRACAB files via jID, and this dictionary is used to map them to a specific
xID in the respective XY objects. Should be supplied if that order matters. If
None is given (default), the links are automatically extracted from the .dat
file at the cost of a second pass through the entire file.
Returns
-------
data_objects: Tuple[XY, XY, XY, XY, XY, XY, Code, Code, Code, Code, Pitch]
XY-, Code-, and Pitch-objects for both teams and both halves. The order is
(home_ht1, home_ht2, away_ht1, away_ht2, ball_ht1, ball_ht2,
possession_ht1, possession_ht2, ballstatus_ht1, ballstatus_ht2, pitch)
"""
# read metadata
metadata, periods, pitch = _read_metadata(filepath_metadata)
segments = list(periods.keys())
# create or check links
if links is None:
links = create_links_from_dat(filepath_dat)
else:
pass
# potential check vs jerseys in dat file
# infer data array shapes
number_of_home_players = max(links["Home"].values()) + 1
number_of_away_players = max(links["Away"].values()) + 1
number_of_frames = {}
for segment in segments:
start = periods[segment][0]
end = periods[segment][1]
number_of_frames[segment] = end - start + 1
# bins
xydata = {}
xydata["Home"] = {
segment: np.full(
[number_of_frames[segment], number_of_home_players * 2], np.nan
)
for segment in segments
}
xydata["Away"] = {
segment: np.full(
[number_of_frames[segment], number_of_away_players * 2], np.nan
)
for segment in segments
}
xydata["Ball"] = {
segment: np.full([number_of_frames[segment], 2], np.nan) for segment in segments
}
codes = {
code: {segment: [] for segment in segments}
for code in ["possession", "ballstatus"]
}
# loop
with open(filepath_dat, "r") as f:
while True:
package = f.readline()
# terminate if at end of file
if len(package) == 0:
break
# read line to get absolute frame (in file), player positions and ball info
frame_abs, positions, ball = _read_dat_single_line(package)
# check if frame is in any segment
segment = _frame_in_period(frame_abs, periods)
if segment is None:
# skip line if not
continue
else:
# otherwise calculate relative frame (in respective segment)
frame_rel = frame_abs - periods[segment][0]
# insert (x,y)-data into correct np.array, at correct place (t, xID)
for team in ["Home", "Away"]:
for jID in positions[team].keys():
# map jersey number to array index and infer respective columns
x_col = (links[team][jID]) * 2
y_col = (links[team][jID]) * 2 + 1
xydata[team][segment][frame_rel, x_col] = positions[team][jID][0]
xydata[team][segment][frame_rel, y_col] = positions[team][jID][1]
# get ball data
xydata["Ball"][segment][
frame_rel,
] = ball["position"]
codes["possession"][segment].append(ball.get("possession", np.nan))
codes["ballstatus"][segment].append(ball.get("ballstatus", np.nan))
# create XY objects
home_ht1 = XY(xy=xydata["Home"]["HT1"], framerate=metadata["framerate"])
home_ht2 = XY(xy=xydata["Home"]["HT2"], framerate=metadata["framerate"])
away_ht1 = XY(xy=xydata["Away"]["HT1"], framerate=metadata["framerate"])
away_ht2 = XY(xy=xydata["Away"]["HT2"], framerate=metadata["framerate"])
ball_ht1 = XY(xy=xydata["Ball"]["HT1"], framerate=metadata["framerate"])
ball_ht2 = XY(xy=xydata["Ball"]["HT2"], framerate=metadata["framerate"])
# create Code objects
possession_ht1 = Code(
code=np.array(codes["possession"]["HT1"]),
name="possession",
definitions={"H": "Home", "A": "Away"},
framerate=metadata["framerate"],
)
possession_ht2 = Code(
code=np.array(codes["possession"]["HT2"]),
name="possession",
definitions={"H": "Home", "A": "Away"},
framerate=metadata["framerate"],
)
ballstatus_ht1 = Code(
code=np.array(codes["ballstatus"]["HT1"]),
name="ballstatus",
definitions={"D": "Dead", "A": "Alive"},
framerate=metadata["framerate"],
)
ballstatus_ht2 = Code(
code=np.array(codes["ballstatus"]["HT2"]),
name="ballstatus",
definitions={"D": "Dead", "A": "Alive"},
framerate=metadata["framerate"],
)
data_objects = (
home_ht1,
home_ht2,
away_ht1,
away_ht2,
ball_ht1,
ball_ht2,
possession_ht1,
possession_ht2,
ballstatus_ht1,
ballstatus_ht2,
pitch,
)
return data_objects
| 32.83905 | 88 | 0.607183 |
7948ab87300d8415a52f92d678047c0f7fcd10fe
| 3,136 |
py
|
Python
|
plugins/youtube.py
|
TeamPeggle/ppp-helpdesk
|
6e5551ab674b3096c0fdf6ef4537215cdd27b4fe
|
[
"Unlicense"
] | 2 |
2016-10-22T20:45:57.000Z
|
2017-08-06T20:35:43.000Z
|
plugins/youtube.py
|
TeamPeggle/ppp-helpdesk
|
6e5551ab674b3096c0fdf6ef4537215cdd27b4fe
|
[
"Unlicense"
] | 9 |
2015-01-12T21:33:24.000Z
|
2018-04-02T15:59:22.000Z
|
plugins/youtube.py
|
TeamPeggle/ppp-helpdesk
|
6e5551ab674b3096c0fdf6ef4537215cdd27b4fe
|
[
"Unlicense"
] | 1 |
2017-08-01T22:13:33.000Z
|
2017-08-01T22:13:33.000Z
|
from __future__ import unicode_literals
from builtins import str
import re
import time
from calendar import timegm
from util import hook, http, timesince
youtube_re = (r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)'
'([-_a-z0-9]+)', re.I)
BASE_URL = 'https://www.googleapis.com/youtube/v3/'
INFO_URL = BASE_URL + 'videos?part=snippet,contentDetails,statistics&hl=en'
SEARCH_API_URL = BASE_URL + 'search'
VIDEO_URL = 'https://youtu.be/%s'
def get_video_description(vid_id, api_key):
j = http.get_json(INFO_URL, id=vid_id, key=api_key)
if not j['pageInfo']['totalResults']:
return
j = j['items'][0]
duration = j['contentDetails']['duration'].replace('PT', '').lower()
published = j['snippet']['publishedAt'].replace('.000Z', 'Z')
published = time.strptime(published, "%Y-%m-%dT%H:%M:%SZ")
published_since = timesince.timesince(timegm(published))
views = 0
likes = 0
dislikes = 0
if 'statistics' in j:
views = group_int_digits(j['statistics'].get('viewCount', 0), ',')
likes = j['statistics'].get('likeCount', 0)
dislikes = j['statistics'].get('dislikeCount', 0)
channel_title = j['snippet']['channelTitle']
title = j['snippet']['title']
if 'localized' in j['snippet']:
title = j['snippet']['localized'].get('title') or title
out = (
'\x02{title}\x02 - length \x02{duration}\x02 - '
'{likes}\u2191{dislikes}\u2193 - '
'\x02{views}\x02 views - '
'published \x02{published_since}\x02 ago by \x02{channel_title}\x02'
).format(
title=title,
duration=duration,
likes=likes,
dislikes=dislikes,
views=views,
published_since=published_since,
channel_title=channel_title
)
if 'rating' in j:
out += ' - rated \x02%.2f/5.0\x02 (%d)' % (j['rating'],
j['ratingCount'])
if 'contentRating' in j:
out += ' - \x034NSFW\x02'
return out
def group_int_digits(number, delimiter=' ', grouping=3):
base = str(number).strip()
builder = []
while base:
builder.append(base[-grouping:])
base = base[:-grouping]
builder.reverse()
return delimiter.join(builder)
@hook.api_key('google')
@hook.regex(*youtube_re)
def youtube_url(match, api_key=None):
return get_video_description(match.group(1), api_key)
@hook.api_key('google')
@hook.command('yt')
@hook.command('y')
@hook.command
def youtube(inp, api_key=None):
'.youtube <query> -- returns the first YouTube search result for <query>'
params = {
'key': api_key,
'fields': 'items(id(videoId))',
'part': 'snippet',
'type': 'video',
'maxResults': '1',
'q': inp,
}
j = http.get_json(SEARCH_API_URL, **params)
if 'error' in j:
return 'error while performing the search'
results = j.get("items")
if not results:
return 'no results found'
vid_id = j['items'][0]['id']['videoId']
return get_video_description(vid_id, api_key) + " - " + VIDEO_URL % vid_id
| 26.803419 | 78 | 0.601084 |
7948ac3e86deed7ce0390f7cce82578527dfdeda
| 4,320 |
py
|
Python
|
registry/tests/log_test.py
|
jbn/quilt
|
67960d2739ce5ea34c05febbe8f2bb9f75e211a8
|
[
"Apache-2.0"
] | null | null | null |
registry/tests/log_test.py
|
jbn/quilt
|
67960d2739ce5ea34c05febbe8f2bb9f75e211a8
|
[
"Apache-2.0"
] | null | null | null |
registry/tests/log_test.py
|
jbn/quilt
|
67960d2739ce5ea34c05febbe8f2bb9f75e211a8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Quilt Data, Inc. All rights reserved.
"""
Log tests
"""
import json
import requests
from unittest.mock import patch
from quilt_server.const import PaymentPlan, PUBLIC
from quilt_server.core import hash_contents, GroupNode, RootNode
from .utils import QuiltTestCase
class LogTestCase(QuiltTestCase):
"""
Test log endpoint.
"""
def setUp(self):
super(LogTestCase, self).setUp()
self.user = "test_user"
self.pkg = "pkg"
self.contents_list = [
RootNode(dict(
foo=GroupNode(dict())
)),
RootNode(dict(
bar=GroupNode(dict())
)),
RootNode(dict(
baz=GroupNode(dict())
))
]
# Upload three package instances.
for contents in self.contents_list:
self.put_package(self.user, self.pkg, contents, tag_latest=True)
def make_version(i, version):
resp = self.app.put(
'/api/version/{usr}/{pkg}/{version}'.format(
usr=self.user,
pkg=self.pkg,
version=version
),
data=json.dumps(dict(
hash=hash_contents(self.contents_list[i])
)),
content_type='application/json',
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.ok
make_version(0, '1.0.0')
make_version(1, '2.0.0')
make_version(2, '3.0.0')
def testLog(self):
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
headers={
'Authorization': self.user
}
)
assert resp.status_code == requests.codes.ok
data = json.loads(resp.data.decode('utf8'))
logs = data['logs']
assert len(logs) == 3
tag_list = [None, None, ['latest']]
version_list = [['1.0.0'], ['2.0.0'], ['3.0.0']]
for log, contents, tags, versions in zip(logs, self.contents_list, tag_list, version_list):
assert log['author'] == self.user
assert log['hash'] == hash_contents(contents)
assert log['tags'] == tags
assert log['versions'] == versions
@patch('quilt_server.views.ALLOW_ANONYMOUS_ACCESS', True)
def testAccess(self):
sharewith = "share_with"
# Can't view as a user with no access.
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
headers={
'Authorization': sharewith
}
)
assert resp.status_code == requests.codes.not_found
# Can't view when not logged in.
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
)
assert resp.status_code == requests.codes.not_found
# Share the package.
resp = self._share_package(self.user, self.pkg, sharewith)
assert resp.status_code == requests.codes.ok
# Can view once it's shared.
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
headers={
'Authorization': sharewith
}
)
assert resp.status_code == requests.codes.ok
# Still can't view when not logged in.
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
)
assert resp.status_code == requests.codes.not_found
# Share the package publicly.
resp = self._share_package(self.user, self.pkg, PUBLIC)
assert resp.status_code == requests.codes.ok
# Can now view when not logged in.
resp = self.app.get(
'/api/log/{usr}/{pkg}/'.format(
usr=self.user,
pkg=self.pkg
),
)
assert resp.status_code == requests.codes.ok
| 28.8 | 99 | 0.502778 |
7948adfbe324abc73698581cc1204bba2764244c
| 771 |
py
|
Python
|
.history/chapter01/python_05_if_condition_20201128215501.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
.history/chapter01/python_05_if_condition_20201128215501.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
.history/chapter01/python_05_if_condition_20201128215501.py
|
KustomApe/nerdape
|
aef6fb2d1f8c364b26d91bf8570b4487a24de69a
|
[
"MIT"
] | null | null | null |
"""[if文について]
もし〜だったら、こうして
"""
# if 条件:
# 実行するブロック
# 条件によって処理を適応したい場合
# 3000kmごとにオイル交換しないといけない
distance = 3403
# if distance > 3000:
# print('オイル交換時期です')
# 文字列を比較する/リストを比較する
# if 'abc' == 'ABC':
# print('1同類です')
# if 'CDE' == 'CDE':
# print('2同類です')
# if 'あいうえお' == 'あいうえお':
# print('3同類です')
# if ['apple', 'banana'] == ['apple', 'banana']:
# print('1リスト同類')
# if ['apple', 'banana'] == ['APPLE', 'BANANA']:
# print('2リスト同類')
# if [1, 2, 3] == ['1', '2', '3']:
# print('3リスト同類')
# if [1, 2, 3] == [1, 2, 3]:
# print('4リスト同類')
# 文字列を検索する/リストの要素を検索する
if 'abc' in "ABC":
print('ヒットしました!')
if 'ドリフト' in '僕はドリフトが好きです':
print('ヒットしました!')
if 'japan' in 'japanese domestic market vehicle':
print('ヒットしました!')
# else文
# elif文
| 17.930233 | 49 | 0.546044 |
7948afaa1f3a9d1099a8051b48e1bb9892292ca7
| 3,549 |
py
|
Python
|
test/functional/p2p_feefilter.py
|
manfromafar/safecash-temp
|
de9623678fe76bc58928c8a36b6491d4d0766588
|
[
"MIT"
] | 3 |
2018-06-05T15:12:08.000Z
|
2018-07-01T04:51:42.000Z
|
test/functional/p2p_feefilter.py
|
manfromafar/safecash-temp
|
de9623678fe76bc58928c8a36b6491d4d0766588
|
[
"MIT"
] | null | null | null |
test/functional/p2p_feefilter.py
|
manfromafar/safecash-temp
|
de9623678fe76bc58928c8a36b6491d4d0766588
|
[
"MIT"
] | 2 |
2018-05-25T13:25:55.000Z
|
2018-06-30T18:14:10.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from test_framework.mininode import *
from test_framework.test_framework import SafeCashTestFramework
from test_framework.util import *
import time
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(SafeCashTestFramework):
def set_test_params(self):
self.num_nodes = 2
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
self.nodes[0].add_p2p_connection(TestNode())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| 37.755319 | 81 | 0.659904 |
7948b0405dacb239f3e9cb1bb9703d56eb60dcee
| 23 |
py
|
Python
|
home.py
|
wuxuechen/news
|
05fe821ed81e3ececc58264acd6e9142e4fd66a2
|
[
"MIT"
] | null | null | null |
home.py
|
wuxuechen/news
|
05fe821ed81e3ececc58264acd6e9142e4fd66a2
|
[
"MIT"
] | null | null | null |
home.py
|
wuxuechen/news
|
05fe821ed81e3ececc58264acd6e9142e4fd66a2
|
[
"MIT"
] | null | null | null |
hi, this is a new home
| 11.5 | 22 | 0.695652 |
7948b090584b208a822b205cc5e11e0d630bb49b
| 599 |
py
|
Python
|
src/_nats/aio/types.py
|
charbonnierg/jetstream.py
|
4d8dc56fc6953d0a28d207b9b162c6f8d0080d37
|
[
"Apache-2.0"
] | 8 |
2021-07-26T10:54:10.000Z
|
2021-12-06T08:41:02.000Z
|
src/_nats/aio/types.py
|
charbonnierg/jetstream.py
|
4d8dc56fc6953d0a28d207b9b162c6f8d0080d37
|
[
"Apache-2.0"
] | 3 |
2021-08-09T10:25:39.000Z
|
2021-12-06T08:40:41.000Z
|
src/_nats/aio/types.py
|
charbonnierg/jetstream.py
|
4d8dc56fc6953d0a28d207b9b162c6f8d0080d37
|
[
"Apache-2.0"
] | 3 |
2021-08-22T01:55:11.000Z
|
2021-09-13T13:51:42.000Z
|
from typing import Dict, List, TypedDict
class ServerInfos(TypedDict, total=False):
server_id: str
server_name: str
version: str
go: str
git_commit: str
host: str
port: int
max_payload: int
proto: int
client_id: int
client_ip: str
auth_required: bool
tls_required: bool
tls_verify: bool
connect_urls: List[str]
ldm: bool
jetstream: bool
headers: Dict[str, str]
nonce: str
class ClientStats(TypedDict):
in_msgs: int
out_msgs: int
in_bytes: int
out_bytes: int
reconnects: int
errors_received: int
| 18.151515 | 42 | 0.656093 |
7948b10f4d3e20be1edb9fb958db6e6f9453aa9a
| 864 |
py
|
Python
|
morbitwrapper/__init__.py
|
manuelbb-upb/MorbitWrapper
|
eecd77c33fdbc5e867e10fbe5ec217a2ada6924c
|
[
"MIT"
] | null | null | null |
morbitwrapper/__init__.py
|
manuelbb-upb/MorbitWrapper
|
eecd77c33fdbc5e867e10fbe5ec217a2ada6924c
|
[
"MIT"
] | 3 |
2021-05-25T15:02:44.000Z
|
2021-05-26T09:38:39.000Z
|
morbitwrapper/__init__.py
|
manuelbb-upb/MorbitWrapper
|
eecd77c33fdbc5e867e10fbe5ec217a2ada6924c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 15 16:25:44 2020
@author: manuelbb
"""
from .MOPClasses import RbfConfig, MOP, AlgoConfig, LagrangeConfig, TaylorConfig, ExactConfig
from .optimization import optimize, initialize_data, iterate, get_ret_values
from .optimization import print_fin_info, print_stop_info
from .globals import load_settings, make_sysimage
from .globals import set_MORBIT_SYS_IMG, get_MORBIT_SYS_IMG, set_JULIA_ENV
from .globals import get_JULIA_ENV, set_JULIA_RUNTIME, get_JULIA_RUNTIME, get_JULIA_RUNTIME_NAME
from .globals import get_JULIA_DEPOT_PATH, set_JULIA_DEPOT_PATH
from .globals import set_FORCE_PYCALL_REBUILD, get_FORCE_PYCALL_REBUILD
from .globals import julia_main
#NOTE
# DONT make globals accessible from outside the module directly
# … leads only to confusion; use setters and getters instead
| 36 | 96 | 0.822917 |
7948b1b60c843dd7adbbe7b6a4359146eeaecd0a
| 982 |
py
|
Python
|
model/fcn.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | 1 |
2021-05-24T08:09:30.000Z
|
2021-05-24T08:09:30.000Z
|
model/fcn.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | null | null | null |
model/fcn.py
|
lixiaoyu0575/physionet_challenge2020_pytorch
|
39b5aeeead440eaa88d6fdaf4a8a70c15373e062
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
from .util import ConvBlock
class FCN(nn.Module):
"""A PyTorch implementation of the FCN Baseline
From https://arxiv.org/abs/1909.04939
Attributes
----------
sequence_length:
The size of the input sequence
num_pred_classes:
The number of output classes
"""
def __init__(self, in_channels: int, num_pred_classes: int = 1) -> None:
super().__init__()
# for easier saving and loading
self.input_args = {
'in_channels': in_channels,
'num_pred_classes': num_pred_classes
}
self.layers = nn.Sequential(*[
ConvBlock(in_channels, 128, 8, 1),
ConvBlock(128, 256, 5, 1),
ConvBlock(256, 128, 3, 1),
])
self.final = nn.Linear(128, num_pred_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor: # type: ignore
x = self.layers(x)
return self.final(x.mean(dim=-1))
| 27.277778 | 76 | 0.593686 |
7948b6dcbb796baefff47c3e791983c0dd58b297
| 1,868 |
py
|
Python
|
12_browser_automation_selenium/lectures/9_adding_some_error_handling/pages/quotes_page.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 29 |
2019-09-02T21:15:59.000Z
|
2022-01-14T02:20:05.000Z
|
12_browser_automation_selenium/lectures/9_adding_some_error_handling/pages/quotes_page.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 2 |
2020-08-20T05:48:36.000Z
|
2021-06-02T03:16:31.000Z
|
12_browser_automation_selenium/lectures/9_adding_some_error_handling/pages/quotes_page.py
|
gdia/The-Complete-Python-Course
|
ed375b65242249bc749c3e292a6149f8528b9dcf
|
[
"MIT"
] | 38 |
2019-10-20T14:29:12.000Z
|
2022-03-27T19:50:05.000Z
|
from typing import List
from selenium.webdriver.support.ui import Select
from locators.quotes_page_locators import QuotesPageLocators
from parsers.quote import QuoteParser
class QuotesPage:
def __init__(self, browser):
self.browser = browser
@property
def quotes(self) -> List[QuoteParser]:
return [
QuoteParser(e)
for e in self.browser.find_elements_by_css_selector(
QuotesPageLocators.QUOTE
)
]
@property
def author_dropdown(self) -> Select:
element = self.browser.find_element_by_css_selector(
QuotesPageLocators.AUTHOR_DROPDOWN
)
return Select(element)
@property
def tags_dropdown(self):
element = self.browser.find_element_by_css_selector(
QuotesPageLocators.TAG_DROPDOWN
)
return Select(element)
@property
def search_button(self):
return self.browser.find_element_by_css_selector(
QuotesPageLocators.SEARCH_BUTTON
)
def select_author(self, author_name: str):
self.author_dropdown.select_by_visible_text(author_name)
def get_available_tags(self) -> List[str]:
return [option.text.strip() for option in self.tags_dropdown.options]
def select_tag(self, tag_name: str):
self.tags_dropdown.select_by_visible_text(tag_name)
def search_for_quotes(self, author_name: str, tag_name: str) -> List[QuoteParser]:
self.select_author(author_name)
try:
self.select_tag(tag_name)
except NoSuchElementException:
raise InvalidTagForAuthorError(
f"Author '{author_name}' does not have any quotes tagged with '{tag_name}'."
)
self.search_button.click()
return self.quotes
class InvalidTagForAuthorError(ValueError):
pass
| 29.1875 | 92 | 0.669165 |
7948b719ad04f742fcabc108c4a63778fb82fe6a
| 917 |
py
|
Python
|
thomas/flask/flask tutorial files/tutorial 5.py
|
ThomasDerZweifler/pyPro
|
8bc6531483f08982b19c08a4cdb1a0c9dbd03737
|
[
"Apache-2.0"
] | null | null | null |
thomas/flask/flask tutorial files/tutorial 5.py
|
ThomasDerZweifler/pyPro
|
8bc6531483f08982b19c08a4cdb1a0c9dbd03737
|
[
"Apache-2.0"
] | null | null | null |
thomas/flask/flask tutorial files/tutorial 5.py
|
ThomasDerZweifler/pyPro
|
8bc6531483f08982b19c08a4cdb1a0c9dbd03737
|
[
"Apache-2.0"
] | 1 |
2020-07-22T17:57:04.000Z
|
2020-07-22T17:57:04.000Z
|
from flask import *
# from flask import Flask, redirect, url_for, render_template, request, session
from datetime import timedelta
app = Flask(__name__)
app.secret_key = "hello"
app.permanent_session_lifetime = timedelta(minutes=5)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/login", methods=["POST", "GET"])
def login():
if request.method == "POST":
session.permanent = True
user = request.form["nm"]
session["user"] = user
return redirect(url_for("user"))
else:
if "user" in session:
return redirect(url_for("user"))
return render_template("login.html")
@app.route("/user")
def user():
if "user" in session:
user = session["user"]
return f"<h1>User: {user}</h1>"
else:
return redirect(url_for("login"))
@app.route("/logout")
def logout():
session.pop("user", None)
return redirect(url_for("login"))
if __name__ == "__main__":
app.run(debug=True)
| 22.925 | 79 | 0.691385 |
7948b7a91f3593819f79d5bab0926e3d18a74b09
| 4,980 |
py
|
Python
|
people/committee_meeting_attendees.py
|
lee-elenbaas/knesset-data-pipelines
|
5ae9f6b0c89b9b7fce647f693535a6a5cdbafbce
|
[
"MIT"
] | 8 |
2017-07-24T12:11:22.000Z
|
2019-04-16T03:28:28.000Z
|
people/committee_meeting_attendees.py
|
lee-elenbaas/knesset-data-pipelines
|
5ae9f6b0c89b9b7fce647f693535a6a5cdbafbce
|
[
"MIT"
] | 141 |
2017-07-24T12:10:57.000Z
|
2019-12-04T07:39:02.000Z
|
people/committee_meeting_attendees.py
|
lee-elenbaas/knesset-data-pipelines
|
5ae9f6b0c89b9b7fce647f693535a6a5cdbafbce
|
[
"MIT"
] | 28 |
2017-07-30T14:20:12.000Z
|
2022-02-20T12:01:12.000Z
|
from datapackage_pipelines.wrapper import process
import logging, requests, os
from knesset_data.protocols.committee import CommitteeMeetingProtocol
import hashlib, json
BASE_HASH_OBJ = hashlib.md5()
with open('../Pipfile.lock') as f:
BASE_HASH_OBJ.update(str(json.load(f)['default']['knesset-data']['hashes']).encode())
def process_row(row, row_index, spec, resource_index, parameters, stats):
if spec['name'] == 'kns_committeesession':
row.update(mks=None, invitees=None, legal_advisors=None, manager=None)
if (
(not parameters.get("filter-meeting-id") or int(row["CommitteeSessionID"]) in parameters["filter-meeting-id"])
and (not parameters.get("filter-committee-id") or int(row["CommitteeID"]) in parameters["filter-committee-id"])
and (not parameters.get("filter-knesset-num") or int(row["KnessetNum"]) in parameters["filter-knesset-num"])
):
if row["text_parsed_filename"]:
new_cache_hash, old_cache_hash, cache_hash_path, cache_hash_row = None, None, None, None
if os.environ.get('KNESSET_PIPELINES_DATA_PATH'):
m = BASE_HASH_OBJ.copy()
m.update(str(row['text_crc32c']).encode())
m.update(str(row['parts_crc32c']).encode())
new_cache_hash = m.hexdigest()
cache_hash_path = os.path.join(os.environ['KNESSET_PIPELINES_DATA_PATH'],
'people/committees/meeting-attendees/cache_hash/{}.json'.format(row["text_parsed_filename"]))
if os.path.exists(cache_hash_path):
with open(cache_hash_path) as f:
cache_data = json.load(f)
old_cache_hash = cache_data['hash']
cache_hash_row = cache_data['row']
if cache_hash_path and old_cache_hash and old_cache_hash == new_cache_hash:
row.update(**cache_hash_row)
else:
logging.info('getting attendees for meeting {}'.format(row['CommitteeSessionID']))
text = None
if os.environ.get('KNESSET_PIPELINES_DATA_PATH'):
protocol_text_path = os.path.join(os.environ['KNESSET_PIPELINES_DATA_PATH'],
'committees/meeting_protocols_text/{}'.format(row["text_parsed_filename"]))
if os.path.exists(protocol_text_path) and os.path.getsize(protocol_text_path) > 0:
with open(protocol_text_path) as f:
text = f.read()
else:
protocol_text_url = "https://storage.googleapis.com/knesset-data-pipelines/data/committees/" \
"meeting_protocols_text/{}".format(row["text_parsed_filename"])
res = requests.get(protocol_text_url)
if res.status_code == 200:
text = res.content.decode("utf-8")
update_row = dict(mks=None, invitees=None, legal_advisors=None, manager=None)
if text:
with CommitteeMeetingProtocol.get_from_text(text) as protocol:
attendees = protocol.attendees
if attendees:
update_row = dict(mks=attendees['mks'],
invitees=attendees['invitees'],
legal_advisors=attendees['legal_advisors'],
manager=attendees['manager'],
financial_advisors=attendees.get('financial_advisors', []))
row.update(**update_row)
if cache_hash_path:
os.makedirs(os.path.dirname(cache_hash_path), exist_ok=True)
with open(cache_hash_path, 'w') as f:
json.dump({'hash': new_cache_hash,
'row': update_row}, f)
return row
def modify_datapackage(datapackage, parameters, stats):
for descriptor in datapackage['resources']:
if descriptor['name'] == 'kns_committeesession':
descriptor['schema']['fields'] += [{"name": "mks", "type": "array"},
{"name": "invitees", "type": "array"},
{"name": "legal_advisors", "type": "array"},
{"name": "manager", "type": "array"},
{"name": "financial_advisors", "type": "array"}]
return datapackage
if __name__ == '__main__':
process(modify_datapackage, process_row)
| 60 | 144 | 0.523896 |
7948b7dd2a5dc61b8ea18180f008b733d4bfbc30
| 3,523 |
py
|
Python
|
tests/python/backends.py
|
piengeng/openldap
|
07429efcff9dc50abb8ca265c8e8caa8cd25d01c
|
[
"OLDAP-2.8"
] | null | null | null |
tests/python/backends.py
|
piengeng/openldap
|
07429efcff9dc50abb8ca265c8e8caa8cd25d01c
|
[
"OLDAP-2.8"
] | null | null | null |
tests/python/backends.py
|
piengeng/openldap
|
07429efcff9dc50abb8ca265c8e8caa8cd25d01c
|
[
"OLDAP-2.8"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This work is part of OpenLDAP Software <http://www.openldap.org/>.
#
# Copyright 2021-2022 The OpenLDAP Foundation.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted only as authorized by the OpenLDAP
# Public License.
#
# A copy of this license is available in the file LICENSE in the
# top-level directory of the distribution or, alternatively, at
# <http://www.OpenLDAP.org/license.html>.
#
# ACKNOWLEDGEMENTS:
# This work was initially developed by Ondřej Kuzník
# for inclusion in OpenLDAP Software.
"""
OpenLDAP fixtures for backends
"""
import ldap0
import logging
import os
import pathlib
import pytest
import secrets
import tempfile
from ldap0.controls.readentry import PostReadControl
from .slapd import server
SOURCEROOT = pathlib.Path(os.environ.get('TOP_SRCDIR', "..")).absolute()
BUILDROOT = pathlib.Path(os.environ.get('TOP_BUILDDIR', SOURCEROOT)).absolute()
logger = logging.getLogger(__name__)
class Database:
have_directory = True
def __init__(self, server, suffix, backend):
self.server = server
self.suffix = suffix
self.rootdn = suffix
self.secret = secrets.token_urlsafe()
self.overlays = []
if suffix in server.suffixes:
raise RuntimeError(f"Suffix {suffix} already configured in server")
if self.have_directory:
self.directory = tempfile.TemporaryDirectory(dir=server.home)
conn = server.connect()
conn.simple_bind_s("cn=config", server.secret)
# We're just after the generated DN, no other attributes at the moment
control = PostReadControl(True, [])
result = conn.add_s(
f"olcDatabase={backend},cn=config", self._entry(),
req_ctrls=[control])
dn = result.ctrls[0].res.dn_s
self.dn = dn
server.suffixes[suffix] = self
def _entry(self):
entry = {
"objectclass": [self.objectclass.encode()],
"olcSuffix": [self.suffix.encode()],
"olcRootDN": [self.suffix.encode()],
"olcRootPW": [self.secret.encode()],
}
if self.have_directory:
entry["olcDbDirectory"] = [self.directory.name.encode()]
return entry
class MDB(Database):
have_directory = True
objectclass = "olcMdbConfig"
_size = 10 * (1024 ** 3)
def __init__(self, server, suffix):
super().__init__(server, suffix, "mdb")
def _entry(self):
entry = {
"olcDbMaxSize": [str(self._size).encode()],
}
return {**super()._entry(), **entry}
class LDAP(Database):
have_directory = False
objectclass = "olcLDAPConfig"
def __init__(self, server, suffix, uris):
self.uris = uris
super().__init__(server, suffix, "ldap")
def _entry(self):
entry = {
"olcDbURI": [" ".join(self.uris).encode()],
}
return {**super()._entry(), **entry}
backend_types = {
"mdb": MDB,
"ldap": LDAP,
}
@pytest.fixture(scope="class")
def db(request, server):
marker = request.node.get_closest_marker("db")
database_type = marker.args[0] if marker else "mdb"
klass = backend_types[database_type]
conn = server.connect()
conn.simple_bind_s("cn=config", server.secret)
db = klass(server, "cn=test")
yield db
conn.delete_s(db.dn)
class TestDB:
def test_db_setup(self, db):
pass
| 25.164286 | 79 | 0.639512 |
7948b8d7bc9575bd02d8814da9f7b812455f2d19
| 827 |
py
|
Python
|
backend/alembic/versions/20210105124927_984ad93b1b7c_add_model_table.py
|
BodenmillerGroup/histocat-web
|
c598cd07506febf0b7c209626d4eb869761f2e62
|
[
"MIT"
] | 4 |
2021-06-14T15:19:25.000Z
|
2022-02-09T13:17:39.000Z
|
backend/alembic/versions/20210105124927_984ad93b1b7c_add_model_table.py
|
BodenmillerGroup/histocat-web
|
c598cd07506febf0b7c209626d4eb869761f2e62
|
[
"MIT"
] | null | null | null |
backend/alembic/versions/20210105124927_984ad93b1b7c_add_model_table.py
|
BodenmillerGroup/histocat-web
|
c598cd07506febf0b7c209626d4eb869761f2e62
|
[
"MIT"
] | 1 |
2022-02-09T13:17:41.000Z
|
2022-02-09T13:17:41.000Z
|
"""Add model table
Revision ID: 984ad93b1b7c
Revises: bcd5ecf42c3e
Create Date: 2021-01-05 12:49:27.310783
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects.postgresql import JSONB
# revision identifiers, used by Alembic.
revision = '984ad93b1b7c'
down_revision = 'bcd5ecf42c3e'
branch_labels = None
depends_on = None
def upgrade():
op.create_table(
'model',
sa.Column('id', sa.Integer(), primary_key=True, index=True),
sa.Column('name', sa.String(), nullable=False, index=True, unique=True),
sa.Column('description', sa.String()),
sa.Column('location', sa.String(4096)),
sa.Column('meta', JSONB()),
sa.Column('created_at', sa.DateTime(), default=sa.sql.func.now(), nullable=False),
)
def downgrade():
op.drop_table('model')
| 24.323529 | 90 | 0.675937 |
7948b8f4c845755aa1b868b8d10288c09f23324f
| 3,922 |
py
|
Python
|
src/networks/my_LeNet.py
|
LukasKratochvila/Deep-SVDD-PyTorch
|
a94bd9b6be4d953706daf969b061ddf55d6cbf4c
|
[
"MIT"
] | null | null | null |
src/networks/my_LeNet.py
|
LukasKratochvila/Deep-SVDD-PyTorch
|
a94bd9b6be4d953706daf969b061ddf55d6cbf4c
|
[
"MIT"
] | null | null | null |
src/networks/my_LeNet.py
|
LukasKratochvila/Deep-SVDD-PyTorch
|
a94bd9b6be4d953706daf969b061ddf55d6cbf4c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 18:40:39 2020
@author: kratochvila
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from base.base_net import BaseNet
class MY_LeNet(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 80 * 60, self.rep_dim, bias=False) # 20 * 15
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
class MY_LeNet_Autoencoder(BaseNet):
def __init__(self):
super().__init__()
self.rep_dim = 128
self.pool = nn.MaxPool2d(2, 2)
# Encoder (must match the Deep SVDD network above)
self.conv1 = nn.Conv2d(3, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d1 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.conv2 = nn.Conv2d(32, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d2 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.conv3 = nn.Conv2d(64, 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.conv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d3 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.fc1 = nn.Linear(128 * 80 * 60, self.rep_dim, bias=False)
self.bn1d = nn.BatchNorm1d(self.rep_dim, eps=1e-04, affine=False)
# Decoder
self.deconv1 = nn.ConvTranspose2d(int(self.rep_dim / (4 * 4)), 128, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv1.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d4 = nn.BatchNorm2d(128, eps=1e-04, affine=False)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv2.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d5 = nn.BatchNorm2d(64, eps=1e-04, affine=False)
self.deconv3 = nn.ConvTranspose2d(64, 32, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv3.weight, gain=nn.init.calculate_gain('leaky_relu'))
self.bn2d6 = nn.BatchNorm2d(32, eps=1e-04, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 3, 5, bias=False, padding=2)
nn.init.xavier_uniform_(self.deconv4.weight, gain=nn.init.calculate_gain('leaky_relu'))
def forward(self, x):
x = self.conv1(x)
x = self.pool(F.leaky_relu(self.bn2d1(x)))
x = self.conv2(x)
x = self.pool(F.leaky_relu(self.bn2d2(x)))
x = self.conv3(x)
x = self.pool(F.leaky_relu(self.bn2d3(x)))
x = x.view(x.size(0), -1)
x = self.bn1d(self.fc1(x))
x = x.view(x.size(0), int(self.rep_dim / (4 * 4)), 4, 4)
x = F.leaky_relu(x)
x = self.deconv1(x)
x = F.interpolate(F.leaky_relu(self.bn2d4(x)), scale_factor=2)
x = self.deconv2(x)
x = F.interpolate(F.leaky_relu(self.bn2d5(x)), scale_factor=2)
x = self.deconv3(x)
x = F.interpolate(F.leaky_relu(self.bn2d6(x)), scale_factor=2)
x = self.deconv4(x)
x = torch.sigmoid(x)
return x
| 40.020408 | 101 | 0.620857 |
7948b92c92375527847fc71801171e30eff7b96c
| 2,881 |
py
|
Python
|
river/stats/mode.py
|
fox-ds/river
|
9ce947ebfc012ec7059de0a09c765b2da7fc1d25
|
[
"BSD-3-Clause"
] | null | null | null |
river/stats/mode.py
|
fox-ds/river
|
9ce947ebfc012ec7059de0a09c765b2da7fc1d25
|
[
"BSD-3-Clause"
] | 1 |
2022-03-31T14:51:13.000Z
|
2022-03-31T15:57:05.000Z
|
river/stats/mode.py
|
fox-ds/river
|
9ce947ebfc012ec7059de0a09c765b2da7fc1d25
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
import typing
from . import base
__all__ = ["Mode"]
class Mode(base.Univariate):
"""Running mode.
The mode is simply the most common value. An approximate mode can be computed by setting the
number of first unique values to count.
Parameters
----------
k
Only the first `k` unique values will be included. If `k` equals -1, the exact mode is
computed.
Examples
--------
>>> from river import stats
>>> X = ['sunny', 'cloudy', 'cloudy', 'rainy', 'rainy', 'rainy']
>>> mode = stats.Mode(k=2)
>>> for x in X:
... print(mode.update(x).get())
sunny
sunny
cloudy
cloudy
cloudy
cloudy
>>> mode = stats.Mode(k=-1)
>>> for x in X:
... print(mode.update(x).get())
sunny
sunny
cloudy
cloudy
cloudy
rainy
"""
def __init__(self, k=25):
self.k = k
self.counts = collections.defaultdict(int)
@property
def name(self):
return "mode"
def update(self, x):
if self.k == -1 or x in self.counts or len(self.counts) < self.k:
self.counts[x] += 1
return self
def get(self):
return max(self.counts, key=self.counts.get, default=None)
class RollingMode(base.RollingUnivariate):
"""Running mode over a window.
The mode is the most common value.
Parameters
----------
window_size
Size of the rolling window.
Attributes
----------
counts : collections.defaultdict
Value counts.
Examples
--------
>>> from river import stats
>>> X = ['sunny', 'sunny', 'sunny', 'rainy', 'rainy', 'rainy', 'rainy']
>>> rolling_mode = stats.RollingMode(window_size=2)
>>> for x in X:
... print(rolling_mode.update(x).get())
sunny
sunny
sunny
sunny
rainy
rainy
rainy
>>> rolling_mode = stats.RollingMode(window_size=5)
>>> for x in X:
... print(rolling_mode.update(x).get())
sunny
sunny
sunny
sunny
sunny
rainy
rainy
"""
def __init__(self, window_size: int):
self.window = collections.deque(maxlen=window_size)
self.counts: typing.DefaultDict[typing.Any, int] = collections.defaultdict(int)
@property
def window_size(self):
return self.window.maxlen
def update(self, x):
if len(self.window) >= self.window_size:
# Subtract the counter of the last element
first_in = self.window[0]
self.counts[first_in] -= 1
# No need to store the value if it's counter is 0
if self.counts[first_in] == 0:
self.counts.pop(first_in)
self.counts[x] += 1
self.window.append(x)
return self
def get(self):
return max(self.counts, key=self.counts.get, default=None)
| 21.183824 | 96 | 0.570288 |
7948ba2bbfba076c4fdda268dbec67afc657681b
| 11,287 |
py
|
Python
|
app.py
|
pedrobcst/Xerus-streamlit
|
9c94a4d05e5aa4439ac6165a5b6951b26d3d2e4d
|
[
"MIT"
] | null | null | null |
app.py
|
pedrobcst/Xerus-streamlit
|
9c94a4d05e5aa4439ac6165a5b6951b26d3d2e4d
|
[
"MIT"
] | 14 |
2022-03-23T09:21:23.000Z
|
2022-03-31T06:36:11.000Z
|
app.py
|
pedrobcst/Xerus-streamlit
|
9c94a4d05e5aa4439ac6165a5b6951b26d3d2e4d
|
[
"MIT"
] | null | null | null |
import os
import shutil
from typing import List, Union
import pandas as pd
import streamlit as st
from st_aggrid import AgGrid
from conf import AppSettings
from xerus_plot import plot_highest_correlated, plot_read_data
from xerus_run import run_opt, run_xerus
os.makedirs(AppSettings.TMP_FOLDER, exist_ok=True)
os.makedirs(AppSettings.RESULTS_TMP_FOLDER, exist_ok=True)
from utils import make_selection_label, process_input, read_input
st.title('XERUS Streamlit Interface Beta')
st.sidebar.markdown("**X**Ray **E**stimation and **R**efinement **U**sing **S**imilarity (**XERUS**)")
st.sidebar.image("https://raw.githubusercontent.com/pedrobcst/Xerus/master/img/g163.png", width=100)
base_columns = list(AppSettings.KEEP_COLUMNS)
# Session state stuff
if 'xerus_started' not in st.session_state:
st.session_state['xerus_started'] = False
if 'xerus_object' not in st.session_state:
st.session_state['xerus_object'] = None
if 'zip_file' not in st.session_state:
st.session_state['zip_file'] = False
if 'optmized' not in st.session_state:
st.session_state['optmized'] = False
@st.cache(allow_output_mutation=True)
def run_analysis(args_xerus: dict, args_analysis: dict):
return run_xerus(args_xerus, args_analysis)
@st.cache(allow_output_mutation=True)
def run_optmizer(xerus_object, index_list: Union[int, List[int]], opt_args: dict):
return run_opt(xerus_object, index_list, opt_args)
# Settings
st.sidebar.header("Data Upload and Name")
name = st.sidebar.text_input("Dataset name", key="name")
file = st.sidebar.file_uploader("Upload data", key="data_uploaded")
if file:
data_format = st.sidebar.text_input("Data format", value=file.name.split(".")[-1], key="data_format")
path = read_input(file)
working_folder = os.path.join(AppSettings.RESULTS_TMP_FOLDER, file.name.split(".")[0]) + f"_{name}"
os.makedirs(working_folder, exist_ok=True)
# Data Visualization Settings
with st.expander('Data View and Settings', expanded=True):
if path:
c1, c2 = st.columns([2, 4])
# Background / Preprocessing information
with c1:
remove_background = st.checkbox("Remove background", value=True, key="remove_bg")
use_preprocessed = st.checkbox("Use preprocessed data", value=False, key="use_pre")
poly_degree = 10
if remove_background:
poly_degree = st.number_input("Polynomial degree", min_value=2, max_value=12, step=1, value=8,key="poly_degree")
elements = st.text_input("Elements seperated by comma", value="Ho", key="element_list").split(",")
elements = [element.strip() for element in elements if len(element) > 0]
max_oxygen = st.number_input("Max oxygen", min_value=0, max_value=10, step=1, value=2, key="max_oxy")
st.write("Current element list is:", elements)
# Data plot
with c2:
figure = plot_read_data(path, format=data_format, poly_degree=int(poly_degree), remove_base=remove_background)
figure.update_layout(legend=dict(yanchor="top",y=1.35,xanchor="left",x=0.00, bgcolor="white"))
st.plotly_chart(figure, use_container_width=True, template="presentation", bgcolor="white")
c1, c2 = st.columns(2)
# Algorithm settings
with c1:
with st.expander('Required Analysis Settings', expanded=True):
n_runs = st.text_input("Number of runs", value="auto", key="n_runs")
if n_runs != "auto":
n_runs = int(n_runs)
g = int(st.number_input("g", min_value=1, max_value=999, value=3, step=1, key="grabtop"))
delta = st.number_input(r"delta", min_value=1.0, max_value=5.0, value=1.3, step=0.1, key="delta")
# Optional search settings
with c2:
with st.expander("Optional Analysis Settings", expanded=True):
ignore_ids = process_input(st.text_input("Ignore IDs", value="", key="ignore_ids"))
ignore_providers = process_input(st.text_input("Ignore providers", value="AFLOW", key="ignore_providers"))
ignore_comb = process_input(st.text_input("Ignore combinations", value="", key="ignore_comb"))
# Filter Settings
with st.expander("Current Filter Settings", expanded=False):
c1, c2, c3 = st.columns(3)
c1.markdown("**Ignore IDs**")
c1.write(ignore_ids)
c2.markdown("**Ignore providers**")
c2.write(ignore_comb)
c3.markdown("**Ignore combinations**")
c3.write(ignore_providers)
if st.button("Run analysis", key="run_analysis"):
args_xerus = dict(name=name, working_folder=working_folder, exp_data_file=path, elements=elements,
max_oxy=max_oxygen, use_preprocessed=use_preprocessed, remove_background=remove_background,
poly_degree=poly_degree, data_fmt=data_format)
args_analysis = dict(n_runs=n_runs, grabtop=g, delta=delta, ignore_ids=ignore_ids,
ignore_provider=ignore_providers, ignore_comb=ignore_comb)
results_search = run_analysis(args_xerus, args_analysis)
st.session_state['optmized'] = False
st.session_state['zip_file'] = False
st.balloons()
st.session_state['xerus_object'] = results_search
if st.session_state.xerus_object:
st.header('Analysis Results')
results_search = st.session_state.xerus_object
df = results_search.results.copy()
if 'wt' in df.columns:
base_columns.append('wt')
df = df[base_columns]
df['id'] = df.index
simuls_df = results_search.simulated_gsas2
df = df[['id'] + base_columns]
with st.expander('Results (Tabular)', expanded = True):
AgGrid(df, width='50%', height=200)
with st.expander('Visualization of Analysis', expanded = True):
viz_number = st.selectbox("Results to Visualize", options=df.index, key="viz_number", format_func = lambda idx: make_selection_label(idx, df))
fig = results_search.plot_result(viz_number)
fig.update_layout(title=None, width=800, height=500)
fig.update_xaxes(title=r'2theta (deg.)')
st.plotly_chart(fig, use_container_width=True)
plot_highest_corr = st.checkbox("Plot Highest correlated", value=False, key='plot_highest_corr')
if plot_highest_corr:
c1, c2 = st.columns([2,4])
pattern_show = results_search.cif_info.copy()
pattern_show.sort_values(by='Cij', inplace=True, ascending=False)
pattern_show.reset_index(drop=True, inplace=True)
highest_correlated = int(c1.number_input("Highest k correlated phases", min_value=1, max_value=len(simuls_df) - 1,
value=len(simuls_df) // 3, step=1, key='highest_corr'))
options = pattern_show.filename[:highest_correlated]
with c2:
st.caption("List of all Options")
AgGrid(pattern_show.loc[:highest_correlated, ['filename', 'Cij', 'spacegroup']], width="50%", height=250)
patterns_show = st.multiselect("Patterns to show", options=options, key='patterns_show', default=options)
fig_highest_corr = plot_highest_correlated(data=results_search.exp_data_file, format=data_format,
cif_info=results_search.cif_info.copy(),
top=highest_correlated, width=800, height=500,
filter_columns=patterns_show)
st.plotly_chart(fig_highest_corr, use_container_width=True)
c1, c2 = st.columns(2)
with c1:
if st.button('Zip Contents'):
shutil.make_archive(working_folder, 'zip', working_folder)
st.session_state['zip_file'] = True
with c2:
if st.session_state['zip_file']:
if os.path.exists(f"{working_folder}.zip"):
with open(f"{working_folder}.zip", "rb") as fp:
btn = st.download_button(
label="Download ZIP file",
data=fp,
file_name=f"{name}_results.zip",
mime="application/zip"
)
with st.expander('Optimizer Settings'):
# Basic optimizer settings
c1,c2,c3,c4 = st.columns(4)
optimizer_idx = process_input(c1.text_input('Indexes to optmize seperated by comma:', value="0", key="opt_list"),return_int=True)
n_trials = int(c2.number_input("Number of trials", min_value=20, max_value=99999, value=200, step=1, key="n_trials"))
param = c3.selectbox(label="Param to optimize", options=["rwp", "gof"])
random_state = int(c4.number_input(label="Random seed number", min_value=0, max_value=9999, step=1, value=42,
key='random_state'))
# Checkboxes
c5, c6, c7, c8, c9 = st.columns(5)
allow_pref_orient = c5.checkbox('Pref Orientation', value=True, key='pref_ori')
allow_atomic_params = c6.checkbox('Atomic Params', value=False, key='atomic')
allow_broad = c7.checkbox('Atomic Params', value=False, key='broadening')
allow_angle = c8.checkbox('Acute angle', value=False, key='acute')
force_ori = c9.checkbox('Force to use pref. ori', value=False, key='force_ori')
opt_args = dict(n_trials=n_trials,
allow_pref_orient=allow_pref_orient,
allow_atomic_params=allow_atomic_params,
allow_broad=allow_broad,
allow_angle=allow_angle,
param=param,
random_state=random_state,
force_ori=force_ori
)
st.write(opt_args)
if st.button('Run optimization'):
st.session_state['xerus_object'] = run_optmizer(results_search, optimizer_idx, opt_args)
st.session_state['optmized'] = True
st.balloons()
if st.session_state['optmized']:
with st.expander('Optimization Results'):
st.write(
f'Optimization finished. Best rwp is {st.session_state.xerus_object.optimizer.optim.rwp_best:.3f}%')
st.subheader('Refinement Result')
fig = st.session_state.xerus_object.optimizer.optim.plot_best(save=False, engine="plotly")
st.plotly_chart(fig, use_container_width=True)
st.subheader('Crystal Structure Result')
AgGrid(pd.DataFrame(data=st.session_state.xerus_object.optimizer.lattice_best), height=100)
if st.button('Export Results to Working Folder'):
st.session_state.xerus_object.export_results()
st.write('Optimizaton results were exported to folder!')
st.write('Rezip and press download again!')
| 52.013825 | 154 | 0.622132 |
7948ba428194527fdea02348a393c7319614fded
| 775 |
py
|
Python
|
openapi_python_client/parser/reference.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
openapi_python_client/parser/reference.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
openapi_python_client/parser/reference.py
|
gmerz/matterapi-generator
|
4ba29c7d308c43365b286e41220e3252fa250526
|
[
"MIT"
] | null | null | null |
""" A Reference is ultimately a Class which will be in models, usually defined in a body input or return type """
from dataclasses import dataclass
from typing import Dict
from .. import utils
class_overrides: Dict[str, "Reference"] = {}
@dataclass
class Reference:
"""A reference to a class which will be in models"""
class_name: str
module_name: str
@staticmethod
def from_ref(ref: str) -> "Reference":
"""Get a Reference from the openapi #/schemas/blahblah string"""
ref_value = ref.split("/")[-1]
class_name = utils.pascal_case(ref_value)
if class_name in class_overrides:
return class_overrides[class_name]
return Reference(class_name=class_name, module_name=utils.snake_case(class_name))
| 27.678571 | 113 | 0.694194 |
7948bc13fb9c540a92603bca8f423d02aecf81c6
| 6,681 |
py
|
Python
|
fluid/PaddleCV/rcnn/utility.py
|
zzszmyf/models
|
95f0a9de0e820ddcf2c6529a3bcb9d12bba13128
|
[
"Apache-2.0"
] | 1 |
2019-02-27T12:27:49.000Z
|
2019-02-27T12:27:49.000Z
|
fluid/PaddleCV/rcnn/utility.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | null | null | null |
fluid/PaddleCV/rcnn/utility.py
|
ThinkPeace/models
|
5d25e00c94943e50e64780a244136f88f13c0a88
|
[
"Apache-2.0"
] | 1 |
2018-12-23T06:37:22.000Z
|
2018-12-23T06:37:22.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
Contains common utility functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import distutils.util
import numpy as np
import six
import collections
from collections import deque
import datetime
from paddle.fluid import core
import argparse
import functools
from config import *
def print_arguments(args):
"""Print argparse's arguments.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
parser.add_argument("name", default="Jonh", type=str, help="User name.")
args = parser.parse_args()
print_arguments(args)
:param args: Input argparse.Namespace for printing.
:type args: argparse.Namespace
"""
print("----------- Configuration Arguments -----------")
for arg, value in sorted(six.iteritems(vars(args))):
print("%s: %s" % (arg, value))
print("------------------------------------------------")
def add_arguments(argname, type, default, help, argparser, **kwargs):
"""Add argparse's argument.
Usage:
.. code-block:: python
parser = argparse.ArgumentParser()
add_argument("name", str, "Jonh", "User name.", parser)
args = parser.parse_args()
"""
type = distutils.util.strtobool if type == bool else type
argparser.add_argument(
"--" + argname,
default=default,
type=type,
help=help + ' Default: %(default)s.',
**kwargs)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
def add_value(self, value):
self.deque.append(value)
def get_median_value(self):
return np.median(self.deque)
def now_time():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
class TrainingStats(object):
def __init__(self, window_size, stats_keys):
self.smoothed_losses_and_metrics = {
key: SmoothedValue(window_size)
for key in stats_keys
}
def update(self, stats):
for k, v in self.smoothed_losses_and_metrics.items():
v.add_value(stats[k])
def get(self, extras=None):
stats = collections.OrderedDict()
if extras:
for k, v in extras.items():
stats[k] = v
for k, v in self.smoothed_losses_and_metrics.items():
stats[k] = round(v.get_median_value(), 3)
return stats
def log(self, extras=None):
d = self.get(extras)
strs = ', '.join(str(dict({x: y})).strip('{}') for x, y in d.items())
return strs
def parse_args():
"""return all args
"""
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
# ENV
add_arg('parallel', bool, True, "Whether use parallel.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('model_save_dir', str, 'output', "The path to save model.")
add_arg('pretrained_model', str, 'imagenet_resnet50_fusebn', "The init model path.")
add_arg('dataset', str, 'coco2017', "coco2014, coco2017.")
add_arg('class_num', int, 81, "Class number.")
add_arg('data_dir', str, 'dataset/coco', "The data root path.")
add_arg('use_pyreader', bool, True, "Use pyreader.")
add_arg('use_profile', bool, False, "Whether use profiler.")
add_arg('padding_minibatch',bool, False,
"If False, only resize image and not pad, image shape is different between"
" GPUs in one mini-batch. If True, image shape is the same in one mini-batch.")
#SOLVER
add_arg('learning_rate', float, 0.01, "Learning rate.")
add_arg('max_iter', int, 180000, "Iter number.")
add_arg('log_window', int, 20, "Log smooth window, set 1 for debug, set 20 for train.")
# RCNN
# RPN
add_arg('anchor_sizes', int, [32,64,128,256,512], "The size of anchors.")
add_arg('aspect_ratios', float, [0.5,1.0,2.0], "The ratio of anchors.")
add_arg('variance', float, [1.,1.,1.,1.], "The variance of anchors.")
add_arg('rpn_stride', float, [16.,16.], "Stride of the feature map that RPN is attached.")
add_arg('rpn_nms_thresh', float, 0.7, "NMS threshold used on RPN proposals")
# TRAIN VAL INFER
add_arg('MASK_ON', bool, False, "Option for different models. If False, choose faster_rcnn. If True, choose mask_rcnn")
add_arg('im_per_batch', int, 1, "Minibatch size.")
add_arg('max_size', int, 1333, "The resized image height.")
add_arg('scales', int, [800], "The resized image height.")
add_arg('batch_size_per_im',int, 512, "fast rcnn head batch size")
add_arg('pixel_means', float, [102.9801, 115.9465, 122.7717], "pixel mean")
add_arg('nms_thresh', float, 0.5, "NMS threshold.")
add_arg('score_thresh', float, 0.05, "score threshold for NMS.")
add_arg('snapshot_stride', int, 10000, "save model every snapshot stride.")
# SINGLE EVAL AND DRAW
add_arg('draw_threshold', float, 0.8, "Confidence threshold to draw bbox.")
add_arg('image_path', str, 'dataset/coco/val2017', "The image path used to inference and visualize.")
add_arg('image_name', str, '', "The single image used to inference and visualize.")
# ce
parser.add_argument(
'--enable_ce', action='store_true', help='If set, run the task with continuous evaluation logs.')
# yapf: enable
args = parser.parse_args()
file_name = sys.argv[0]
if 'train' in file_name or 'profile' in file_name:
merge_cfg_from_args(args, 'train')
else:
merge_cfg_from_args(args, 'val')
return args
| 37.960227 | 123 | 0.626553 |
7948bc771a28060b74b667f943c7e964840c4d63
| 2,680 |
py
|
Python
|
2d-fluvial/dataloader.py
|
sees-usc/geo-selection-inversion
|
31fd16389d1e6a2b057ab71e1db87333572d1463
|
[
"Apache-2.0"
] | null | null | null |
2d-fluvial/dataloader.py
|
sees-usc/geo-selection-inversion
|
31fd16389d1e6a2b057ab71e1db87333572d1463
|
[
"Apache-2.0"
] | 1 |
2021-11-17T05:31:34.000Z
|
2021-11-17T05:31:34.000Z
|
2d-fluvial/dataloader.py
|
sees-usc/geo-selection-inversion
|
31fd16389d1e6a2b057ab71e1db87333572d1463
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
class DataLoader:
def __init__(self, simulator, verbose=False):
self.verbose = verbose
self.x_train = []
self.x_test = []
self.y_train = []
self.y_test = []
self.y_reg_train = []
self.y_reg_test = []
self.sim = simulator
self.maxs = []
def normalize(self, x):
x_min = np.min(x)
x_max = np.max(x)
return (x - x_min)/(x_max - x_min)
def load_data(self):
'''create (simulate) a synthetic "time series" data vector (y) for each of the input (x) such that y=Gx and G is linear
self.sim represents some abstract function (i.e. fluid flow simulator)
self.y_reg is presimulated
'''
x = np.load("data\M.npy")
y_reg = np.load("data\D.npy")
#reshape the models
x_r = np.zeros([x.shape[0], 100, 100, 1])
for i in range(x.shape[0]):
x_r[i,:,:,:] = np.reshape(x[i,:], [1, 100, 100, 1])
x = x_r
self.maxs = np.max(y_reg, axis=0)
y_reg = y_reg/self.maxs
#create label, for every 500 models for 5 scenarios
y = np.zeros([x.shape[0], ], dtype=np.int32)
for i in range(5):
y[i*500:i*500+500] = i
#randomly sample from five scenarios
np.random.seed(999)
indexes = np.random.permutation(np.arange(0, x.shape[0], dtype=np.int32))
partition = int(x.shape[0]*0.8)
train_idx = indexes[0:partition]
test_idx = indexes[partition:]
self.x_train = x[train_idx]
self.x_test = x[test_idx]
self.y_train = y[train_idx]
self.y_test = y[test_idx]
self.y_reg_train = y_reg[train_idx]
self.y_reg_test = y_reg[test_idx]
if self.verbose:
print("Loaded training data x {:s} and y {:s} and y_labels {:s}".format(str(self.x_train.shape), str(self.y_reg_train.shape), str(self.y_train.shape)))
print("Loaded testing data x {:s} and y {:s} and y_labels {:s}".format(str(self.x_test.shape), str(self.y_reg_test.shape), str(self.y_test.shape)))
return self.x_train, self.x_test, self.y_train, self.y_test, self.y_reg_train, self.y_reg_test
def simulator(self, ms):
'''simulate observations for a given set of models
'''
ms = np.where(ms<0.5, 0, 1)
d_dim = self.sim.shape[-1]
ds = np.zeros([ms.shape[0], d_dim])
for i in range(ms.shape[0]):
print("Running simulation ", i)
ds[i:i+1, :] = np.reshape((ms[i:i+1, :, :, 0]), [1, ms.shape[1]*ms.shape[2]])@self.sim
ds[i:i+1, :] = ds[i:i+1, :] /np.squeeze(self.maxs)
return np.expand_dims(ds, axis=-1)
| 29.450549 | 154 | 0.657463 |
7948bca7cba26e291e8de9ff71ddee1cc9187899
| 2,315 |
py
|
Python
|
projects/python_plugin_code/pandas_1.2_code/tests/html_meta_data/test_meta.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | 1 |
2021-11-07T03:47:51.000Z
|
2021-11-07T03:47:51.000Z
|
projects/python_plugin_code/pandas_1.2_code/tests/html_meta_data/test_meta.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | null | null | null |
projects/python_plugin_code/pandas_1.2_code/tests/html_meta_data/test_meta.py
|
rendner/py-plugin-dataframe-viewer
|
188585bd31a6c14413949865b3467dcbf6f5e2d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 cms.rendner (Daniel Schmidt)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import pytest
from plugin_code.patched_styler import PatchedStyler
from tests.helpers.asserts.table_extractor import TableExtractor, OffsetIndexTranslator, SequenceIndexTranslator
df = pd.DataFrame.from_dict({
"col_0": [0, 1, 2, 3, 4],
"col_1": [5, 6, 7, 8, 9],
"col_2": [10, 11, 12, 13, 14],
"col_3": [15, 16, 17, 18, 19],
"col_4": [20, 21, 22, 23, 24],
})
@pytest.mark.parametrize(
"styler, expected_row_indexer_class, expected_col_indexer_class", [
(df.style, OffsetIndexTranslator, OffsetIndexTranslator),
(df.style.hide_index().hide_columns(subset=df.columns), OffsetIndexTranslator, OffsetIndexTranslator),
(df.style.hide_columns(subset=['col_1']), OffsetIndexTranslator, SequenceIndexTranslator),
(df.style.hide_index(), OffsetIndexTranslator, OffsetIndexTranslator),
])
def test_correct_indexer(styler, expected_row_indexer_class, expected_col_indexer_class):
table_extractor = TableExtractor()
patched_styler = PatchedStyler(styler)
rows_per_chunk = 2
cols_per_chunk = 2
table_structure = patched_styler.get_table_structure()
for ri in range(0, table_structure.visible_rows_count, rows_per_chunk):
for ci in range(0, table_structure.visible_columns_count, cols_per_chunk):
chunk_html = patched_styler.render_chunk(
ri,
ci,
ri + rows_per_chunk,
ci + cols_per_chunk
)
table_extractor.extract(chunk_html)
assert isinstance(table_extractor.ctx.col_indexer, expected_col_indexer_class)
assert isinstance(table_extractor.ctx.row_indexer, expected_row_indexer_class)
| 40.614035 | 112 | 0.714471 |
7948bd2959c9a88f180f6ff153c3b93d0b338ebe
| 3,549 |
py
|
Python
|
bindings/python/ensmallen/datasets/string/stanieriacyanosphaera.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 5 |
2021-09-10T18:31:58.000Z
|
2022-03-24T04:28:04.000Z
|
bindings/python/ensmallen/datasets/string/stanieriacyanosphaera.py
|
AnacletoLAB/ensmallen_graph
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 18 |
2021-01-07T16:47:39.000Z
|
2021-08-12T21:51:32.000Z
|
bindings/python/ensmallen/datasets/string/stanieriacyanosphaera.py
|
AnacletoLAB/ensmallen
|
b2c1b18fb1e5801712852bcc239f239e03076f09
|
[
"MIT"
] | 3 |
2021-01-14T02:20:59.000Z
|
2021-08-04T19:09:52.000Z
|
"""
This file offers the methods to automatically retrieve the graph Stanieria cyanosphaera.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def StanieriaCyanosphaera(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Stanieria cyanosphaera graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Stanieria cyanosphaera graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="StanieriaCyanosphaera",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 32.861111 | 223 | 0.677656 |
7948bd7d09cf1dbd26dee5eab201ac3dc58b399f
| 5,878 |
py
|
Python
|
discord/stage_instance.py
|
CuzImSyntax/dis.py
|
2f56d546f2594e1dbb6955c8b13b3a884a04eca6
|
[
"MIT"
] | 1 |
2021-09-11T13:39:44.000Z
|
2021-09-11T13:39:44.000Z
|
discord/stage_instance.py
|
CuzImSyntax/discordIO
|
47cd508ed48004aa61a41a5b196d3ff003456219
|
[
"MIT"
] | null | null | null |
discord/stage_instance.py
|
CuzImSyntax/discordIO
|
47cd508ed48004aa61a41a5b196d3ff003456219
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz 2021-present CuzImSyntax
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from .utils import MISSING, cached_slot_property
from .mixins import Hashable
from .errors import InvalidArgument
from .enums import StagePrivacyLevel, try_enum
__all__ = (
'StageInstance',
)
if TYPE_CHECKING:
from .types.channel import StageInstance as StageInstancePayload
from .state import ConnectionState
from .channel import StageChannel
from .guild import Guild
class StageInstance(Hashable):
"""Represents a stage instance of a stage channel in a guild.
.. versionadded:: 2.0
.. container:: operations
.. describe:: x == y
Checks if two stage instances are equal.
.. describe:: x != y
Checks if two stage instances are not equal.
.. describe:: hash(x)
Returns the stage instance's hash.
Attributes
-----------
id: :class:`int`
The stage instance's ID.
guild: :class:`Guild`
The guild that the stage instance is running in.
channel_id: :class:`int`
The ID of the channel that the stage instance is running in.
topic: :class:`str`
The topic of the stage instance.
privacy_level: :class:`StagePrivacyLevel`
The privacy level of the stage instance.
discoverable_disabled: :class:`bool`
Whether discoverability for the stage instance is disabled.
"""
__slots__ = (
'_state',
'id',
'guild',
'channel_id',
'topic',
'privacy_level',
'discoverable_disabled',
'_cs_channel',
)
def __init__(self, *, state: ConnectionState, guild: Guild, data: StageInstancePayload) -> None:
self._state = state
self.guild = guild
self._update(data)
def _update(self, data: StageInstancePayload):
self.id: int = int(data['id'])
self.channel_id: int = int(data['channel_id'])
self.topic: str = data['topic']
self.privacy_level: StagePrivacyLevel = try_enum(StagePrivacyLevel, data['privacy_level'])
self.discoverable_disabled: bool = data.get('discoverable_disabled', False)
def __repr__(self) -> str:
return f'<StageInstance id={self.id} guild={self.guild!r} channel_id={self.channel_id} topic={self.topic!r}>'
@cached_slot_property('_cs_channel')
def channel(self) -> Optional[StageChannel]:
"""Optional[:class:`StageChannel`]: The channel that stage instance is running in."""
# the returned channel will always be a StageChannel or None
return self._state.get_channel(self.channel_id) # type: ignore
def is_public(self) -> bool:
return self.privacy_level is StagePrivacyLevel.public
async def edit(self, *, topic: str = MISSING, privacy_level: StagePrivacyLevel = MISSING, reason: Optional[str] = None) -> None:
"""|coro|
Edits the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
topic: :class:`str`
The stage instance's new topic.
privacy_level: :class:`StagePrivacyLevel`
The stage instance's new privacy level.
reason: :class:`str`
The reason the stage instance was edited. Shows up on the audit log.
Raises
------
InvalidArgument
If the ``privacy_level`` parameter is not the proper type.
Forbidden
You do not have permissions to edit the stage instance.
HTTPException
Editing a stage instance failed.
"""
payload = {}
if topic is not MISSING:
payload['topic'] = topic
if privacy_level is not MISSING:
if not isinstance(privacy_level, StagePrivacyLevel):
raise InvalidArgument('privacy_level field must be of type PrivacyLevel')
payload['privacy_level'] = privacy_level.value
if payload:
await self._state.http.edit_stage_instance(self.channel_id, **payload, reason=reason)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the stage instance.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
-----------
reason: :class:`str`
The reason the stage instance was deleted. Shows up on the audit log.
Raises
------
Forbidden
You do not have permissions to delete the stage instance.
HTTPException
Deleting the stage instance failed.
"""
await self._state.http.delete_stage_instance(self.channel_id, reason=reason)
| 33.20904 | 132 | 0.6623 |
7948bf95f48d341182df24c9e4b019dba8fed4bd
| 2,040 |
py
|
Python
|
matchms/importing/load_from_usi.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/importing/load_from_usi.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
matchms/importing/load_from_usi.py
|
maximskorik/matchms
|
922f5afaef123a793194bdd74391027477cbb844
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import numpy as np
import requests
from ..Spectrum import Spectrum
logger = logging.getLogger("matchms")
def load_from_usi(usi: str, server: str = "https://metabolomics-usi.ucsd.edu",
metadata_harmonization: bool = True):
"""Load spectrum from metabolomics USI.
USI returns JSON data with keys "peaks", "n_peaks" and "precuror_mz"
.. code-block:: python
from matchms.importing import load_from_usi
spectrum = load_from_usi("mzspec:MASSBANK::accession:SM858102")
print(f"Found spectrum with precursor m/z of {spectrum.get("precursor_mz"):.2f}.")
Parameters
----------
usi:
Provide the usi.
server: string
USI server
metadata_harmonization : bool, optional
Set to False if metadata harmonization to default keys is not desired.
The default is True.
"""
# Create the url
url = server + "/json/?usi1=" + usi
metadata = {"usi": usi, "server": server}
response = requests.get(url)
if response.status_code == 404:
return None
# Extract data and create Spectrum object
try:
spectral_data = response.json()
if spectral_data is None or "peaks" not in spectral_data:
logger.info("Empty spectrum found (no data found). Will not be imported.")
return None
peaks = spectral_data["peaks"]
if len(peaks) == 0:
logger.info("Empty spectrum found (no peaks in 'peaks_json'). Will not be imported.")
return None
mz_list, intensity_list = zip(*peaks)
mz_array = np.array(mz_list)
intensity_array = np.array(intensity_list)
metadata["precursor_mz"] = spectral_data.get("precursor_mz", None)
s = Spectrum(mz_array, intensity_array, metadata,
metadata_harmonization=metadata_harmonization)
return s
except json.decoder.JSONDecodeError:
logger.warning("Failed to unpack json (JSONDecodeError).")
return None
| 30.909091 | 97 | 0.644118 |
7948bff6364f769ed01dac24ef18a8322ab8dda5
| 2,988 |
py
|
Python
|
forte/data/container.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
forte/data/container.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
forte/data/container.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Forte Container module.
"""
# Disable some pylint check for stub and overloads.
# pylint: disable=function-redefined,multiple-statements
from abc import abstractmethod
from typing import Dict, Generic, Set, Tuple, TypeVar, Iterator
__all__ = [
"EntryContainer",
"ContainerType",
"BasePointer",
]
E = TypeVar("E")
L = TypeVar("L")
G = TypeVar("G")
class BasePointer:
"""
Objects to point to other objects in the data pack.
"""
def __str__(self):
raise NotImplementedError
def __getstate__(self):
state = self.__dict__.copy()
return state
class EntryContainer(Generic[E, L, G]):
def __init__(self):
# Record the set of entries created by some components.
self._creation_records: Dict[str, Set[int]] = {}
# Record the set of fields modified by this component. The 2-tuple
# identify the entry field, such as (2, lemma).
self._field_records: Dict[str, Set[Tuple[int, str]]] = {}
def __setstate__(self, state):
r"""In deserialization,
- The :class:`IdManager` is recreated from the id count.
"""
self.__dict__.update(state)
if "creation_records" in self.__dict__:
self._creation_records = self.__dict__.pop("creation_records")
if "field_records" in self.__dict__:
self._field_records = self.__dict__.pop("field_records")
@abstractmethod
def on_entry_creation(self, entry: E):
raise NotImplementedError
@abstractmethod
def record_field(self, entry_id: int, field_name: str):
raise NotImplementedError
@abstractmethod
def _validate(self, item: E) -> bool:
r"""Validate whether this entry type can be added. This method is
called by the :meth:`~forte.data.ontology.core.Entry.__init__` method
when an instance of :class:`~forte.data.ontology.core.Entry` is being
added to the pack.
Args:
item: The entry itself.
"""
raise NotImplementedError
@abstractmethod
def get_entry(self, ptr: int) -> E:
raise NotImplementedError
def get_span_text(self, begin: int, end: int):
raise NotImplementedError
def get_all_creator(self) -> Iterator[str]:
yield from self._creation_records.keys()
ContainerType = TypeVar("ContainerType", bound=EntryContainer)
| 29.584158 | 77 | 0.677041 |
7948c1451e18296a8c4d038cc00e22177b21a941
| 12,235 |
py
|
Python
|
resource_manage_sdk/model/easy_command/task_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5 |
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
resource_manage_sdk/model/easy_command/task_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
resource_manage_sdk/model/easy_command/task_detail_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: task_detail.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_manage_sdk.model.easy_command import task_callback_pb2 as resource__manage__sdk_dot_model_dot_easy__command_dot_task__callback__pb2
from resource_manage_sdk.model.easy_command import target_log_pb2 as resource__manage__sdk_dot_model_dot_easy__command_dot_target__log__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='task_detail.proto',
package='easy_command',
syntax='proto3',
serialized_options=_b('ZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_command'),
serialized_pb=_b('\n\x11task_detail.proto\x12\x0c\x65\x61sy_command\x1a:resource_manage_sdk/model/easy_command/task_callback.proto\x1a\x37resource_manage_sdk/model/easy_command/target_log.proto\"\xd7\x03\n\nTaskDetail\x12\x0e\n\x06taskId\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04type\x18\x03 \x01(\t\x12\x11\n\toperation\x18\x04 \x01(\t\x12\x0f\n\x07groupId\x18\x05 \x01(\t\x12\r\n\x05\x61ppId\x18\x06 \x01(\t\x12\x11\n\tclusterId\x18\x07 \x01(\t\x12\x11\n\tpackageId\x18\x08 \x01(\t\x12\x11\n\tversionId\x18\t \x01(\t\x12\x12\n\nneedNotify\x18\n \x01(\x08\x12,\n\x08\x63\x61llback\x18\x0b \x01(\x0b\x32\x1a.easy_command.TaskCallback\x12\x10\n\x08\x62\x61tchNum\x18\x0c \x01(\x05\x12\x15\n\rbatchInterval\x18\r \x01(\x05\x12\x12\n\nfailedStop\x18\x0e \x01(\x08\x12\x0b\n\x03org\x18\x0f \x01(\x05\x12\x10\n\x08operator\x18\x10 \x01(\t\x12\x0e\n\x06status\x18\x11 \x01(\t\x12\x0c\n\x04\x63ode\x18\x12 \x01(\x05\x12\x10\n\x08usedTime\x18\x13 \x01(\x05\x12\x11\n\tstartTime\x18\x14 \x01(\t\x12\x12\n\nupdateTime\x18\x15 \x01(\t\x12\x0f\n\x07\x65ndTime\x18\x16 \x01(\t\x12+\n\ntargetsLog\x18\x17 \x03(\x0b\x32\x17.easy_command.TargetLogBHZFgo.easyops.local/contracts/protorepo-models/easyops/model/easy_commandb\x06proto3')
,
dependencies=[resource__manage__sdk_dot_model_dot_easy__command_dot_task__callback__pb2.DESCRIPTOR,resource__manage__sdk_dot_model_dot_easy__command_dot_target__log__pb2.DESCRIPTOR,])
_TASKDETAIL = _descriptor.Descriptor(
name='TaskDetail',
full_name='easy_command.TaskDetail',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='easy_command.TaskDetail.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='easy_command.TaskDetail.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='easy_command.TaskDetail.type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operation', full_name='easy_command.TaskDetail.operation', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupId', full_name='easy_command.TaskDetail.groupId', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='appId', full_name='easy_command.TaskDetail.appId', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='clusterId', full_name='easy_command.TaskDetail.clusterId', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packageId', full_name='easy_command.TaskDetail.packageId', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='versionId', full_name='easy_command.TaskDetail.versionId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='needNotify', full_name='easy_command.TaskDetail.needNotify', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='callback', full_name='easy_command.TaskDetail.callback', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_command.TaskDetail.batchNum', index=11,
number=12, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_command.TaskDetail.batchInterval', index=12,
number=13, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_command.TaskDetail.failedStop', index=13,
number=14, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='easy_command.TaskDetail.org', index=14,
number=15, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operator', full_name='easy_command.TaskDetail.operator', index=15,
number=16, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='easy_command.TaskDetail.status', index=16,
number=17, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='code', full_name='easy_command.TaskDetail.code', index=17,
number=18, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='usedTime', full_name='easy_command.TaskDetail.usedTime', index=18,
number=19, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='easy_command.TaskDetail.startTime', index=19,
number=20, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='easy_command.TaskDetail.updateTime', index=20,
number=21, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='easy_command.TaskDetail.endTime', index=21,
number=22, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='targetsLog', full_name='easy_command.TaskDetail.targetsLog', index=22,
number=23, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=153,
serialized_end=624,
)
_TASKDETAIL.fields_by_name['callback'].message_type = resource__manage__sdk_dot_model_dot_easy__command_dot_task__callback__pb2._TASKCALLBACK
_TASKDETAIL.fields_by_name['targetsLog'].message_type = resource__manage__sdk_dot_model_dot_easy__command_dot_target__log__pb2._TARGETLOG
DESCRIPTOR.message_types_by_name['TaskDetail'] = _TASKDETAIL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TaskDetail = _reflection.GeneratedProtocolMessageType('TaskDetail', (_message.Message,), {
'DESCRIPTOR' : _TASKDETAIL,
'__module__' : 'task_detail_pb2'
# @@protoc_insertion_point(class_scope:easy_command.TaskDetail)
})
_sym_db.RegisterMessage(TaskDetail)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 52.965368 | 1,242 | 0.746955 |
7948c203b13283171734d7e581161f22b3632daa
| 6,041 |
py
|
Python
|
mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 232 |
2021-05-25T12:55:24.000Z
|
2022-03-25T07:58:49.000Z
|
mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 51 |
2021-05-29T06:36:54.000Z
|
2022-03-27T09:24:39.000Z
|
mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
|
deepakksingh/mmdetection
|
b0d845f1fecf8064db30ef6b456b6ef5f36fa40f
|
[
"Apache-2.0"
] | 66 |
2021-06-01T03:40:08.000Z
|
2022-03-30T16:51:21.000Z
|
import numpy as np
import torch
from ..builder import BBOX_SAMPLERS
from .random_sampler import RandomSampler
@BBOX_SAMPLERS.register_module()
class IoUBalancedNegSampler(RandomSampler):
"""IoU Balanced Sampling
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Sampling proposals according to their IoU. `floor_fraction` of needed RoIs
are sampled from proposals whose IoU are lower than `floor_thr` randomly.
The others are sampled from proposals whose IoU are higher than
`floor_thr`. These proposals are sampled from some bins evenly, which are
split by `num_bins` via IoU evenly.
Args:
num (int): number of proposals.
pos_fraction (float): fraction of positive proposals.
floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,
set to -1 if all using IoU balanced sampling.
floor_fraction (float): sampling fraction of proposals under floor_thr.
num_bins (int): number of bins in IoU balanced sampling.
"""
def __init__(self,
num,
pos_fraction,
floor_thr=-1,
floor_fraction=0,
num_bins=3,
**kwargs):
super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,
**kwargs)
assert floor_thr >= 0 or floor_thr == -1
assert 0 <= floor_fraction <= 1
assert num_bins >= 1
self.floor_thr = floor_thr
self.floor_fraction = floor_fraction
self.num_bins = num_bins
def sample_via_interval(self, max_overlaps, full_set, num_expected):
max_iou = max_overlaps.max()
iou_interval = (max_iou - self.floor_thr) / self.num_bins
per_num_expected = int(num_expected / self.num_bins)
sampled_inds = []
for i in range(self.num_bins):
start_iou = self.floor_thr + i * iou_interval
end_iou = self.floor_thr + (i + 1) * iou_interval
tmp_set = set(
np.where(
np.logical_and(max_overlaps >= start_iou,
max_overlaps < end_iou))[0])
tmp_inds = list(tmp_set & full_set)
if len(tmp_inds) > per_num_expected:
tmp_sampled_set = self.random_choice(tmp_inds,
per_num_expected)
else:
tmp_sampled_set = np.array(tmp_inds, dtype=np.int)
sampled_inds.append(tmp_sampled_set)
sampled_inds = np.concatenate(sampled_inds)
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(full_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate([sampled_inds, extra_inds])
return sampled_inds
def _sample_neg(self, assign_result, num_expected, **kwargs):
neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
if neg_inds.numel() != 0:
neg_inds = neg_inds.squeeze(1)
if len(neg_inds) <= num_expected:
return neg_inds
else:
max_overlaps = assign_result.max_overlaps.cpu().numpy()
# balance sampling for negative samples
neg_set = set(neg_inds.cpu().numpy())
if self.floor_thr > 0:
floor_set = set(
np.where(
np.logical_and(max_overlaps >= 0,
max_overlaps < self.floor_thr))[0])
iou_sampling_set = set(
np.where(max_overlaps >= self.floor_thr)[0])
elif self.floor_thr == 0:
floor_set = set(np.where(max_overlaps == 0)[0])
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
else:
floor_set = set()
iou_sampling_set = set(
np.where(max_overlaps > self.floor_thr)[0])
# for sampling interval calculation
self.floor_thr = 0
floor_neg_inds = list(floor_set & neg_set)
iou_sampling_neg_inds = list(iou_sampling_set & neg_set)
num_expected_iou_sampling = int(num_expected *
(1 - self.floor_fraction))
if len(iou_sampling_neg_inds) > num_expected_iou_sampling:
if self.num_bins >= 2:
iou_sampled_inds = self.sample_via_interval(
max_overlaps, set(iou_sampling_neg_inds),
num_expected_iou_sampling)
else:
iou_sampled_inds = self.random_choice(
iou_sampling_neg_inds, num_expected_iou_sampling)
else:
iou_sampled_inds = np.array(
iou_sampling_neg_inds, dtype=np.int)
num_expected_floor = num_expected - len(iou_sampled_inds)
if len(floor_neg_inds) > num_expected_floor:
sampled_floor_inds = self.random_choice(
floor_neg_inds, num_expected_floor)
else:
sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)
sampled_inds = np.concatenate(
(sampled_floor_inds, iou_sampled_inds))
if len(sampled_inds) < num_expected:
num_extra = num_expected - len(sampled_inds)
extra_inds = np.array(list(neg_set - set(sampled_inds)))
if len(extra_inds) > num_extra:
extra_inds = self.random_choice(extra_inds, num_extra)
sampled_inds = np.concatenate((sampled_inds, extra_inds))
sampled_inds = torch.from_numpy(sampled_inds).long().to(
assign_result.gt_inds.device)
return sampled_inds
| 43.775362 | 79 | 0.578878 |
7948c2201ac2c9dd120543c19363ced7ca3f257c
| 6,994 |
py
|
Python
|
torchero/meters/confusion_matrix.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 10 |
2020-07-06T13:35:26.000Z
|
2021-08-10T09:46:53.000Z
|
torchero/meters/confusion_matrix.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 6 |
2020-07-07T20:52:16.000Z
|
2020-07-14T04:05:02.000Z
|
torchero/meters/confusion_matrix.py
|
juancruzsosa/torchero
|
d1440b7a9c3ab2c1d3abbb282abb9ee1ea240797
|
[
"MIT"
] | 1 |
2021-06-28T17:56:11.000Z
|
2021-06-28T17:56:11.000Z
|
from abc import ABCMeta, abstractmethod
import torch
from torchero.meters.base import BaseMeter
class ConfusionMatrixController(object, metaclass=ABCMeta):
def __init__(self, normalize=False):
self.normalize = normalize
self.reset()
@property
@abstractmethod
def matrix(self):
pass
@abstractmethod
def reset(self):
pass
def increment(self, a, b):
for i, j in zip(a, b):
self.matrix[i][j] += 1
@property
def num_classes(self):
return self.matrix.shape[0]
def plot(self, ax=None, fig=None, classes=None, xlabel="Predicted label", ylabel="True label", title="Confusion Matrix", cmap="Blues", colorbar=False):
try:
from matplotlib import pyplot as plt
except ImportError:
raise ImportError(
"Matplotlib is required in order to plot confusion matrix"
)
if (classes is not None) and (len(classes) != self.num_classes):
raise ValueError("number of classes is: {} but {} were passed!".format(self.num_classes, len(classes)))
if ax is None:
ax = plt.gca()
if fig is None:
fig = plt.gcf()
matrix = self.matrix
normalized_matrix = matrix / matrix.sum(dim=0)
cmap = plt.get_cmap(cmap)
im=ax.imshow(normalized_matrix, cmap=cmap, vmin=0, vmax=1)
ax.set_xticks(range(self.num_classes))
ax.set_yticks(range(self.num_classes))
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
for i in range(matrix.shape[0]):
for j in range(matrix.shape[1]):
value = matrix[i, j].item()
normalized_value = normalized_matrix[i, j].item()
if self.normalize:
value = '{:.2f}'.format(value)
else:
value = '{}'.format(int(value))
if i == j:
value += " " + "({:.0f}%)".format(normalized_value * 100)
r, g, b, _ = cmap(normalized_value)
text_color = 'white' if r * g * b < 0.5 else 'black'
text = ax.text(j, i, value,
ha="center", va="center", color=text_color)
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if title is not None:
ax.set_title(title)
if classes is not None:
ax.set_xticklabels(classes)
ax.set_yticklabels(classes)
if colorbar:
fig.colorbar(im, ax=ax)
class FixedConfusionMatrixController(ConfusionMatrixController):
def __init__(self, nr_classes, normalize=False):
if not isinstance(nr_classes, int) or nr_classes == 0:
raise Exception(ConfusionMatrix.INVALID_NR_OF_CLASSES_MESSAGE
.format(nr_classes=nr_classes))
self._nr_classes = nr_classes
super(FixedConfusionMatrixController, self).__init__(normalize=normalize)
@property
def matrix(self):
if self.normalize:
return self._matrix / self._matrix.sum(dim=0)
else:
return self._matrix
def reset(self):
self._matrix = torch.zeros(self._nr_classes, self._nr_classes)
def check_inputs(self, xs):
if not ((0 <= xs) & (xs < self._matrix.shape[0])).all():
raise Exception(ConfusionMatrix.INVALID_LABELS_MESSAGE)
def increment(self, a, b):
self.check_inputs(torch.cat([a, b]))
super(FixedConfusionMatrixController, self).increment(a, b)
class ResizableConfusionMatrixController(ConfusionMatrixController):
@property
def matrix(self):
if self.normalize:
return self._matrix / self._matrix.sum(dim=0)
else:
return self._matrix
def reset(self):
self._matrix = torch.zeros(1, 1)
def expand(self, n):
total_rows = n + self._matrix.shape[0]
total_cols = n + self._matrix.shape[1]
old_matrix, self._matrix = self._matrix, torch.zeros(total_rows,
total_cols)
self._matrix[:old_matrix.shape[0], :old_matrix.shape[1]] = old_matrix
def increment(self, a, b):
max_class_nr = max(torch.max(a), torch.max(b))
if max_class_nr >= self._matrix.shape[0]:
n = max_class_nr - self._matrix.shape[0] + 1
self.expand(n)
super(ResizableConfusionMatrixController, self).increment(a, b)
class ConfusionMatrix(BaseMeter):
INVALID_NR_OF_CLASSES_MESSAGE = (
'Expected number of classes to be greater than one. Got {nr_classes}'
)
INVALID_INPUT_TYPE_MESSAGE = (
'Expected input tensors of type LongTensor. Got {type_}'
)
INVALID_BATCH_DIMENSION_MESSAGE = (
'Expected input tensors of 1-dimention. Got {dims}'
)
INVALID_LENGTHS_MESSAGE = (
'Expected input and targets of same lengths'
)
INVALID_LABELS_MESSAGE = (
'Expected labels between 0 and number of classes'
)
def __init__(self, nr_classes='auto', normalize=False):
""" Constructor
Arguments:
nr_classes (int or str): If 'auto' is passed the confusion matrix will readjust
to the observed ranges. If a number is passed this will reserve the confusion matrix
for that size. Default 'auto'
normalize (bool): IF passed the confusion matrix will hold percentages
"""
if isinstance(nr_classes, str) and nr_classes == 'auto':
self.matrix_controller = ResizableConfusionMatrixController(normalize=normalize)
elif isinstance(nr_classes, int) and nr_classes > 0:
self.matrix_controller = FixedConfusionMatrixController(nr_classes, normalize=normalize)
else:
raise ValueError(self.INVALID_NR_OF_CLASSES_MESSAGE
.format(nr_classes=nr_classes))
self.reset()
def reset(self):
self.matrix_controller.reset()
def check_tensor(self, a):
if (isinstance(a, torch.FloatTensor) or
isinstance(a, torch.cuda.FloatTensor)):
raise Exception(self.INVALID_INPUT_TYPE_MESSAGE
.format(type_=a.type()))
if a.dim() > 1:
raise Exception(self.INVALID_BATCH_DIMENSION_MESSAGE
.format(dims=a.dim()))
def measure(self, a, b):
if a.dim() == 2:
a = a.topk(k=1, dim=1)[1].squeeze(1)
if b.dim() == 2:
b = b.topk(k=1, dim=1)[1].squeeze(1)
self.check_tensor(a)
self.check_tensor(b)
if len(a) != len(b):
raise Exception(self.INVALID_LENGTHS_MESSAGE)
self.matrix_controller.increment(a, b)
def value(self):
return self.matrix_controller
| 34.117073 | 155 | 0.591221 |
7948c2813024cd7d1a5a678af60c5d4f84b72bd9
| 227 |
py
|
Python
|
server/deploy.py
|
Autophagy/Regn
|
f3cae76f7662b45a062c9a8f20e0f085b5eba3cd
|
[
"MIT"
] | 3 |
2018-04-02T23:35:05.000Z
|
2018-04-03T18:51:56.000Z
|
server/deploy.py
|
Autophagy/Regn
|
f3cae76f7662b45a062c9a8f20e0f085b5eba3cd
|
[
"MIT"
] | null | null | null |
server/deploy.py
|
Autophagy/Regn
|
f3cae76f7662b45a062c9a8f20e0f085b5eba3cd
|
[
"MIT"
] | null | null | null |
from app import create_app, db
import os
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
db.create_all()
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 22.7 | 56 | 0.678414 |
7948c2c62855eef8c6e5b22a2d02f4ff62c046a5
| 496 |
py
|
Python
|
python_basico_(logica_de_programacao)/aula20.py
|
Alesxander36/Curso_de_Python3
|
438f25789921e5d61500e3483af62fc41bddc50f
|
[
"MIT"
] | null | null | null |
python_basico_(logica_de_programacao)/aula20.py
|
Alesxander36/Curso_de_Python3
|
438f25789921e5d61500e3483af62fc41bddc50f
|
[
"MIT"
] | null | null | null |
python_basico_(logica_de_programacao)/aula20.py
|
Alesxander36/Curso_de_Python3
|
438f25789921e5d61500e3483af62fc41bddc50f
|
[
"MIT"
] | null | null | null |
# Indices
# 0123456789.................33
frase = 'o rato roeu a roupa do rei de roma' # Iterável
tamanho_frase = len(frase)
print(tamanho_frase)
contador = 0
nova_string = ''
input_do_usuario = input('Qual letra deseja colocar em maiuscula: ')
# Iteração <- Iterar
while contador < tamanho_frase:
letra = frase[contador]
if letra == input_do_usuario:
nova_string += input_do_usuario.upper()
else:
nova_string += letra
contador += 1
print(nova_string)
| 21.565217 | 68 | 0.663306 |
7948c4c3b4efe69fbd1db2e3c76ecc667093d073
| 25,992 |
py
|
Python
|
pythonscript/x11-64-cpython/lib/python3.6/asyncio/sslproto.py
|
LeonardoJavalindo/GameJamMiner
|
f0aeff4e666cd0de439f2237e632b8ab03871476
|
[
"Apache-2.0",
"OpenSSL"
] | 7 |
2018-09-10T05:42:46.000Z
|
2021-05-01T12:48:30.000Z
|
pythonscript/x11-64-cpython/lib/python3.6/asyncio/sslproto.py
|
LeonardoJavalindo/GameJamMiner
|
f0aeff4e666cd0de439f2237e632b8ab03871476
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
pythonscript/x11-64-cpython/lib/python3.6/asyncio/sslproto.py
|
LeonardoJavalindo/GameJamMiner
|
f0aeff4e666cd0de439f2237e632b8ab03871476
|
[
"Apache-2.0",
"OpenSSL"
] | 1 |
2021-06-09T16:44:57.000Z
|
2021-06-09T16:44:57.000Z
|
import collections
import warnings
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from . import base_events
from . import compat
from . import protocols
from . import transports
from .log import logger
def _create_transport_context(server_side, server_hostname):
if server_side:
raise ValueError('Server side SSL needs a valid SSLContext')
# Client side may pass ssl=True to use a default
# context; in that case the sslcontext passed is None.
# The default is secure for client connections.
if hasattr(ssl, 'create_default_context'):
# Python 3.4+: use up-to-date strong settings.
sslcontext = ssl.create_default_context()
if not server_hostname:
sslcontext.check_hostname = False
else:
# Fallback for Python 3.3.
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.options |= ssl.OP_NO_SSLv3
sslcontext.set_default_verify_paths()
sslcontext.verify_mode = ssl.CERT_REQUIRED
return sslcontext
def _is_sslproto_available():
return hasattr(ssl, "MemoryBIO")
# States of an _SSLPipe.
_UNWRAPPED = "UNWRAPPED"
_DO_HANDSHAKE = "DO_HANDSHAKE"
_WRAPPED = "WRAPPED"
_SHUTDOWN = "SHUTDOWN"
class _SSLPipe(object):
"""An SSL "Pipe".
An SSL pipe allows you to communicate with an SSL/TLS protocol instance
through memory buffers. It can be used to implement a security layer for an
existing connection where you don't have access to the connection's file
descriptor, or for some reason you don't want to use it.
An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode,
data is passed through untransformed. In wrapped mode, application level
data is encrypted to SSL record level data and vice versa. The SSL record
level is the lowest level in the SSL protocol suite and is what travels
as-is over the wire.
An SslPipe initially is in "unwrapped" mode. To start SSL, call
do_handshake(). To shutdown SSL again, call unwrap().
"""
max_size = 256 * 1024 # Buffer size passed to read()
def __init__(self, context, server_side, server_hostname=None):
"""
The *context* argument specifies the ssl.SSLContext to use.
The *server_side* argument indicates whether this is a server side or
client side transport.
The optional *server_hostname* argument can be used to specify the
hostname you are connecting to. You may only specify this parameter if
the _ssl module supports Server Name Indication (SNI).
"""
self._context = context
self._server_side = server_side
self._server_hostname = server_hostname
self._state = _UNWRAPPED
self._incoming = ssl.MemoryBIO()
self._outgoing = ssl.MemoryBIO()
self._sslobj = None
self._need_ssldata = False
self._handshake_cb = None
self._shutdown_cb = None
@property
def context(self):
"""The SSL context passed to the constructor."""
return self._context
@property
def ssl_object(self):
"""The internal ssl.SSLObject instance.
Return None if the pipe is not wrapped.
"""
return self._sslobj
@property
def need_ssldata(self):
"""Whether more record level data is needed to complete a handshake
that is currently in progress."""
return self._need_ssldata
@property
def wrapped(self):
"""
Whether a security layer is currently in effect.
Return False during handshake.
"""
return self._state == _WRAPPED
def do_handshake(self, callback=None):
"""Start the SSL handshake.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the handshake is complete. The callback will be
called with None if successful, else an exception instance.
"""
if self._state != _UNWRAPPED:
raise RuntimeError('handshake in progress or completed')
self._sslobj = self._context.wrap_bio(
self._incoming, self._outgoing,
server_side=self._server_side,
server_hostname=self._server_hostname)
self._state = _DO_HANDSHAKE
self._handshake_cb = callback
ssldata, appdata = self.feed_ssldata(b'', only_handshake=True)
assert len(appdata) == 0
return ssldata
def shutdown(self, callback=None):
"""Start the SSL shutdown sequence.
Return a list of ssldata. A ssldata element is a list of buffers
The optional *callback* argument can be used to install a callback that
will be called when the shutdown is complete. The callback will be
called without arguments.
"""
if self._state == _UNWRAPPED:
raise RuntimeError('no security layer present')
if self._state == _SHUTDOWN:
raise RuntimeError('shutdown in progress')
assert self._state in (_WRAPPED, _DO_HANDSHAKE)
self._state = _SHUTDOWN
self._shutdown_cb = callback
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
return ssldata
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b'']
def feed_ssldata(self, data, only_handshake=False):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling shutdown().
"""
if self._state == _UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
if data:
appdata = [data]
else:
appdata = []
return ([], appdata)
self._need_ssldata = False
if data:
self._incoming.write(data)
ssldata = []
appdata = []
try:
if self._state == _DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = _WRAPPED
if self._handshake_cb:
self._handshake_cb(None)
if only_handshake:
return (ssldata, appdata)
# Handshake done: execute the wrapped block
if self._state == _WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.max_size)
appdata.append(chunk)
if not chunk: # close_notify
break
elif self._state == _SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = _UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
elif self._state == _UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, ssl.CertificateError) as exc:
if getattr(exc, 'errno', None) not in (
ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
if self._state == _DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(exc)
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata)
def feed_appdata(self, data, offset=0):
"""Feed plaintext data into the pipe.
Return an (ssldata, offset) tuple. The ssldata element is a list of
buffers containing record level data that needs to be sent to the
remote SSL instance. The offset is the number of plaintext bytes that
were processed, which may be less than the length of data.
NOTE: In case of short writes, this call MUST be retried with the SAME
buffer passed into the *data* argument (i.e. the id() must be the
same). This is an OpenSSL requirement. A further particularity is that
a short write will always have offset == 0, because the _ssl module
does not enable partial writes. And even though the offset is zero,
there will still be encrypted data in ssldata.
"""
assert 0 <= offset <= len(data)
if self._state == _UNWRAPPED:
# pass through data in unwrapped mode
if offset < len(data):
ssldata = [data[offset:]]
else:
ssldata = []
return (ssldata, len(data))
ssldata = []
view = memoryview(data)
while True:
self._need_ssldata = False
try:
if offset < len(view):
offset += self._sslobj.write(view[offset:])
except ssl.SSLError as exc:
# It is not allowed to call write() after unwrap() until the
# close_notify is acknowledged. We return the condition to the
# caller as a short write.
if exc.reason == 'PROTOCOL_IS_SHUTDOWN':
exc.errno = ssl.SSL_ERROR_WANT_READ
if exc.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
ssl.SSL_ERROR_SYSCALL):
raise
self._need_ssldata = (exc.errno == ssl.SSL_ERROR_WANT_READ)
# See if there's any record level data back for us.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
if offset == len(view) or self._need_ssldata:
break
return (ssldata, offset)
class _SSLProtocolTransport(transports._FlowControlMixin,
transports.Transport):
def __init__(self, loop, ssl_protocol, app_protocol):
self._loop = loop
# SSLProtocol instance
self._ssl_protocol = ssl_protocol
self._app_protocol = app_protocol
self._closed = False
def get_extra_info(self, name, default=None):
"""Get optional transport information."""
return self._ssl_protocol._get_extra_info(name, default)
def set_protocol(self, protocol):
self._app_protocol = protocol
def get_protocol(self):
return self._app_protocol
def is_closing(self):
return self._closed
def close(self):
"""Close the transport.
Buffered data will be flushed asynchronously. No more data
will be received. After all buffered data is flushed, the
protocol's connection_lost() method will (eventually) called
with None as its argument.
"""
self._closed = True
self._ssl_protocol._start_shutdown()
# On Python 3.3 and older, objects with a destructor part of a reference
# cycle are never destroyed. It's not more the case on Python 3.4 thanks
# to the PEP 442.
if compat.PY34:
def __del__(self):
if not self._closed:
warnings.warn("unclosed transport %r" % self, ResourceWarning,
source=self)
self.close()
def pause_reading(self):
"""Pause the receiving end.
No data will be passed to the protocol's data_received()
method until resume_reading() is called.
"""
self._ssl_protocol._transport.pause_reading()
def resume_reading(self):
"""Resume the receiving end.
Data received will once again be passed to the protocol's
data_received() method.
"""
self._ssl_protocol._transport.resume_reading()
def set_write_buffer_limits(self, high=None, low=None):
"""Set the high- and low-water limits for write flow control.
These two values control when to call the protocol's
pause_writing() and resume_writing() methods. If specified,
the low-water limit must be less than or equal to the
high-water limit. Neither value can be negative.
The defaults are implementation-specific. If only the
high-water limit is given, the low-water limit defaults to an
implementation-specific value less than or equal to the
high-water limit. Setting high to zero forces low to zero as
well, and causes pause_writing() to be called whenever the
buffer becomes non-empty. Setting low to zero causes
resume_writing() to be called only once the buffer is empty.
Use of zero for either limit is generally sub-optimal as it
reduces opportunities for doing I/O and computation
concurrently.
"""
self._ssl_protocol._transport.set_write_buffer_limits(high, low)
def get_write_buffer_size(self):
"""Return the current size of the write buffer."""
return self._ssl_protocol._transport.get_write_buffer_size()
def write(self, data):
"""Write some data bytes to the transport.
This does not block; it buffers the data and arranges for it
to be sent out asynchronously.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError("data: expecting a bytes-like instance, got {!r}"
.format(type(data).__name__))
if not data:
return
self._ssl_protocol._write_appdata(data)
def can_write_eof(self):
"""Return True if this transport supports write_eof(), False if not."""
return False
def abort(self):
"""Close the transport immediately.
Buffered data will be lost. No more data will be received.
The protocol's connection_lost() method will (eventually) be
called with None as its argument.
"""
self._ssl_protocol._abort()
class SSLProtocol(protocols.Protocol):
"""SSL protocol.
Implementation of SSL on top of a socket using incoming and outgoing
buffers which are ssl.MemoryBIO objects.
"""
def __init__(self, loop, app_protocol, sslcontext, waiter,
server_side=False, server_hostname=None,
call_connection_made=True):
if ssl is None:
raise RuntimeError('stdlib ssl module not available')
if not sslcontext:
sslcontext = _create_transport_context(server_side, server_hostname)
self._server_side = server_side
if server_hostname and not server_side:
self._server_hostname = server_hostname
else:
self._server_hostname = None
self._sslcontext = sslcontext
# SSL-specific extra info. More info are set when the handshake
# completes.
self._extra = dict(sslcontext=sslcontext)
# App data write buffering
self._write_backlog = collections.deque()
self._write_buffer_size = 0
self._waiter = waiter
self._loop = loop
self._app_protocol = app_protocol
self._app_transport = _SSLProtocolTransport(self._loop,
self, self._app_protocol)
# _SSLPipe instance (None until the connection is made)
self._sslpipe = None
self._session_established = False
self._in_handshake = False
self._in_shutdown = False
# transport, ex: SelectorSocketTransport
self._transport = None
self._call_connection_made = call_connection_made
def _wakeup_waiter(self, exc=None):
if self._waiter is None:
return
if not self._waiter.cancelled():
if exc is not None:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(None)
self._waiter = None
def connection_made(self, transport):
"""Called when the low-level connection is made.
Start the SSL handshake.
"""
self._transport = transport
self._sslpipe = _SSLPipe(self._sslcontext,
self._server_side,
self._server_hostname)
self._start_handshake()
def connection_lost(self, exc):
"""Called when the low-level connection is lost or closed.
The argument is an exception object or None (the latter
meaning a regular EOF is received or the connection was
aborted or closed).
"""
if self._session_established:
self._session_established = False
self._loop.call_soon(self._app_protocol.connection_lost, exc)
self._transport = None
self._app_transport = None
self._wakeup_waiter(exc)
def pause_writing(self):
"""Called when the low-level transport's buffer goes over
the high-water mark.
"""
self._app_protocol.pause_writing()
def resume_writing(self):
"""Called when the low-level transport's buffer drains below
the low-water mark.
"""
self._app_protocol.resume_writing()
def data_received(self, data):
"""Called when some SSL data is received.
The argument is a bytes object.
"""
try:
ssldata, appdata = self._sslpipe.feed_ssldata(data)
except ssl.SSLError as e:
if self._loop.get_debug():
logger.warning('%r: SSL error %s (reason %s)',
self, e.errno, e.reason)
self._abort()
return
for chunk in ssldata:
self._transport.write(chunk)
for chunk in appdata:
if chunk:
self._app_protocol.data_received(chunk)
else:
self._start_shutdown()
break
def eof_received(self):
"""Called when the other end of the low-level stream
is half-closed.
If this returns a false value (including None), the transport
will close itself. If it returns a true value, closing the
transport is up to the protocol.
"""
try:
if self._loop.get_debug():
logger.debug("%r received EOF", self)
self._wakeup_waiter(ConnectionResetError)
if not self._in_handshake:
keep_open = self._app_protocol.eof_received()
if keep_open:
logger.warning('returning true from eof_received() '
'has no effect when using ssl')
finally:
self._transport.close()
def _get_extra_info(self, name, default=None):
if name in self._extra:
return self._extra[name]
elif self._transport is not None:
return self._transport.get_extra_info(name, default)
else:
return default
def _start_shutdown(self):
if self._in_shutdown:
return
if self._in_handshake:
self._abort()
else:
self._in_shutdown = True
self._write_appdata(b'')
def _write_appdata(self, data):
self._write_backlog.append((data, 0))
self._write_buffer_size += len(data)
self._process_write_backlog()
def _start_handshake(self):
if self._loop.get_debug():
logger.debug("%r starts SSL handshake", self)
self._handshake_start_time = self._loop.time()
else:
self._handshake_start_time = None
self._in_handshake = True
# (b'', 1) is a special value in _process_write_backlog() to do
# the SSL handshake
self._write_backlog.append((b'', 1))
self._loop.call_soon(self._process_write_backlog)
def _on_handshake_complete(self, handshake_exc):
self._in_handshake = False
sslobj = self._sslpipe.ssl_object
try:
if handshake_exc is not None:
raise handshake_exc
peercert = sslobj.getpeercert()
if not hasattr(self._sslcontext, 'check_hostname'):
# Verify hostname if requested, Python 3.4+ uses check_hostname
# and checks the hostname in do_handshake()
if (self._server_hostname
and self._sslcontext.verify_mode != ssl.CERT_NONE):
ssl.match_hostname(peercert, self._server_hostname)
except BaseException as exc:
if self._loop.get_debug():
if isinstance(exc, ssl.CertificateError):
logger.warning("%r: SSL handshake failed "
"on verifying the certificate",
self, exc_info=True)
else:
logger.warning("%r: SSL handshake failed",
self, exc_info=True)
self._transport.close()
if isinstance(exc, Exception):
self._wakeup_waiter(exc)
return
else:
raise
if self._loop.get_debug():
dt = self._loop.time() - self._handshake_start_time
logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3)
# Add extra info that becomes available after handshake.
self._extra.update(peercert=peercert,
cipher=sslobj.cipher(),
compression=sslobj.compression(),
ssl_object=sslobj,
)
if self._call_connection_made:
self._app_protocol.connection_made(self._app_transport)
self._wakeup_waiter()
self._session_established = True
# In case transport.write() was already called. Don't call
# immediately _process_write_backlog(), but schedule it:
# _on_handshake_complete() can be called indirectly from
# _process_write_backlog(), and _process_write_backlog() is not
# reentrant.
self._loop.call_soon(self._process_write_backlog)
def _process_write_backlog(self):
# Try to make progress on the write backlog.
if self._transport is None:
return
try:
for i in range(len(self._write_backlog)):
data, offset = self._write_backlog[0]
if data:
ssldata, offset = self._sslpipe.feed_appdata(data, offset)
elif offset:
ssldata = self._sslpipe.do_handshake(
self._on_handshake_complete)
offset = 1
else:
ssldata = self._sslpipe.shutdown(self._finalize)
offset = 1
for chunk in ssldata:
self._transport.write(chunk)
if offset < len(data):
self._write_backlog[0] = (data, offset)
# A short write means that a write is blocked on a read
# We need to enable reading if it is paused!
assert self._sslpipe.need_ssldata
if self._transport._paused:
self._transport.resume_reading()
break
# An entire chunk from the backlog was processed. We can
# delete it and reduce the outstanding buffer size.
del self._write_backlog[0]
self._write_buffer_size -= len(data)
except BaseException as exc:
if self._in_handshake:
# BaseExceptions will be re-raised in _on_handshake_complete.
self._on_handshake_complete(exc)
else:
self._fatal_error(exc, 'Fatal error on SSL transport')
if not isinstance(exc, Exception):
# BaseException
raise
def _fatal_error(self, exc, message='Fatal error on transport'):
# Should be called from exception handler only.
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self._transport,
'protocol': self,
})
if self._transport:
self._transport._force_close(exc)
def _finalize(self):
self._sslpipe = None
if self._transport is not None:
self._transport.close()
def _abort(self):
try:
if self._transport is not None:
self._transport.abort()
finally:
self._finalize()
| 37.131429 | 80 | 0.6008 |
7948c551307777621e02d1b7b73aec4b093a1b1d
| 1,058 |
py
|
Python
|
Discord-Bot/commands/Sync.py
|
akarealemil/WHMCS-Discord-Sync
|
72c9f16e9c3bfa92bf16d718f04b3fee51c8261c
|
[
"MIT"
] | null | null | null |
Discord-Bot/commands/Sync.py
|
akarealemil/WHMCS-Discord-Sync
|
72c9f16e9c3bfa92bf16d718f04b3fee51c8261c
|
[
"MIT"
] | 1 |
2022-03-14T08:49:20.000Z
|
2022-03-31T07:40:54.000Z
|
Discord-Bot/commands/Sync.py
|
akarealemil/WHMCS-Discord-Sync
|
72c9f16e9c3bfa92bf16d718f04b3fee51c8261c
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
from util import other
# Manually syncs all members of the discord
# If they're connected and have one or more active products, they receive the client role
# Otherwise they lose it
class Sync(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
@commands.is_owner()
async def sync(self, ctx):
# Deletes original message
await ctx.message.delete()
# Sends information message
message = await ctx.send(embed=other.warning_embed("Manually syncing all members. This may take a while..."))
# Function to sync all members
await other.total_rank_sync(ctx.author.guild, message)
@sync.error
async def show_error(self, ctx, error):
# If user fails 'is_admin' check
if isinstance(error, commands.CheckFailure):
await ctx.send(embed=other.error_embed("You do not have permission to use this command."))
def setup(bot):
bot.add_cog(Sync(bot))
| 31.117647 | 118 | 0.666352 |
7948c614a0f5cd66fc523acd84e6f564a708d470
| 411 |
py
|
Python
|
pygfa/graph_element/parser/__init__.py
|
Francesco2304/pygfa
|
9bf6fb5f0a959685300ab863a0e716a2268109f7
|
[
"MIT"
] | 3 |
2020-06-25T22:47:02.000Z
|
2022-02-27T15:16:02.000Z
|
pygfa/graph_element/parser/__init__.py
|
Francesco2304/pygfa
|
9bf6fb5f0a959685300ab863a0e716a2268109f7
|
[
"MIT"
] | 3 |
2017-08-08T12:24:23.000Z
|
2022-02-27T15:17:25.000Z
|
pygfa/graph_element/parser/__init__.py
|
Francesco2304/pygfa
|
9bf6fb5f0a959685300ab863a0e716a2268109f7
|
[
"MIT"
] | 4 |
2019-02-04T20:54:53.000Z
|
2020-05-14T19:52:24.000Z
|
from pygfa.graph_element.parser import header
from pygfa.graph_element.parser import segment
from pygfa.graph_element.parser import link
from pygfa.graph_element.parser import containment
from pygfa.graph_element.parser import path
from pygfa.graph_element.parser import edge
from pygfa.graph_element.parser import fragment
from pygfa.graph_element.parser import gap
from pygfa.graph_element.parser import group
| 45.666667 | 50 | 0.871046 |
7948c7224a121160f1c0eeb24d04e62b461d9950
| 3,637 |
py
|
Python
|
search_index.py
|
eduard0729/hocrviewer
|
83f7d5024d32019d529a1657cbf5b9081a2d6eff
|
[
"MIT"
] | null | null | null |
search_index.py
|
eduard0729/hocrviewer
|
83f7d5024d32019d529a1657cbf5b9081a2d6eff
|
[
"MIT"
] | null | null | null |
search_index.py
|
eduard0729/hocrviewer
|
83f7d5024d32019d529a1657cbf5b9081a2d6eff
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
from __future__ import unicode_literals
import os
import re
from lxml import etree
from whoosh import highlight
from whoosh.analysis import CharsetFilter, RegexTokenizer
from whoosh.fields import Schema, ID, NUMERIC, TEXT
from whoosh.index import create_in, open_dir
from whoosh.query import And, Term
from whoosh.qparser import QueryParser
from whoosh.support.charset import accent_map
analyzer = RegexTokenizer() | CharsetFilter(accent_map)
schema = Schema(
bookname=ID(stored=True),
pagenum=NUMERIC(stored=True),
content=TEXT(analyzer=analyzer, stored=True)
)
BOOK_PATH = os.path.join(os.path.expanduser('books'), '')
INDEX_PATH = os.path.join(BOOK_PATH, '.index')
class StringFormatter(highlight.Formatter):
def __init__(self, begin_str, end_str):
self.begin_str = begin_str
self.end_str = end_str
def format_token(self, text, token, replace=False):
tokentext = highlight.get_text(text, token, replace)
return "{0}{1}{2}".format(self.begin_str, tokentext, self.end_str)
def _get_index():
if not os.path.exists(INDEX_PATH):
os.mkdir(INDEX_PATH)
ix = create_in(INDEX_PATH, schema)
else:
ix = open_dir(INDEX_PATH)
return ix
def index_book(bookname):
# TODO: Index by paragraph, not page
idx = _get_index()
writer = idx.writer()
writer.delete_by_term('bookname', unicode(bookname))
path = os.path.join(BOOK_PATH, bookname, "{0}.hocr".format(bookname))
bookname = unicode(os.path.splitext(os.path.basename(path))[0])
booktree = etree.parse(path)
for page in booktree.xpath('//div[@class="ocr_page"]'):
# Get cleaned up text for page
text = "\n".join("".join(x.itertext()).strip()
for x in page.xpath('.//span[@class="ocr_line"]'))
pagenum = int(page.get('id')[5:])
writer.add_document(bookname=bookname, pagenum=pagenum, content=text)
writer.commit()
def search(term, bookname=None, limit=None):
out_list = []
with _get_index().searcher() as searcher:
parser = QueryParser("content", schema=schema)
print searcher
query = parser.parse(term)
if bookname:
query = And([query, Term("bookname", unicode(bookname))])
results = searcher.search(query, limit=limit)
results.fragmenter.charlimit = None
results.fragmenter.maxchars = 300
results.fragmenter.surround = 50
results.formatter = StringFormatter('{{{', '}}}')
for hit in results:
out_list.append({
'bookname': hit['bookname'],
'pagenum': hit['pagenum'],
'snippet': hit.highlights("content"),
'highlights': _get_highlights(hit)
})
print out_list
return out_list
def _get_highlights(result):
# FIXME: This is f*****ing slow...
highlights = []
fname = os.path.join(BOOK_PATH, result['bookname'],
"{0}.hocr".format(result['bookname']))
tree = etree.parse(fname)
page = tree.xpath('//div[@id="page_{0}"]'.format(result['pagenum']))[0]
hl_tokens = set(re.findall(r'{{{([^{}]+)}}}',
result.highlights("content")))
for token in hl_tokens:
occurences = [x for x in page.xpath('.//span[@class="ocrx_word"]')
if "".join(x.itertext())
and token.lower() in "".join(x.itertext()).lower()]
for hit in occurences:
highlights.append(tuple(hit.get('title').replace('bbox ', '')
.split(' ')))
return tuple(highlights)
| 34.971154 | 77 | 0.624416 |
7948c8903b09835e90240f84bbcfc08d8fb622a6
| 634 |
py
|
Python
|
uncertainty_library/utils.py
|
Featurespace/uncertainty-attribution
|
06986c56600366e5516f5b39ec2bc61436ba4b35
|
[
"MIT"
] | 3 |
2021-07-26T13:10:27.000Z
|
2022-01-03T16:27:31.000Z
|
uncertainty_library/utils.py
|
Featurespace/uncertainty-attribution
|
06986c56600366e5516f5b39ec2bc61436ba4b35
|
[
"MIT"
] | null | null | null |
uncertainty_library/utils.py
|
Featurespace/uncertainty-attribution
|
06986c56600366e5516f5b39ec2bc61436ba4b35
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
def remove_test_time_dropout(model_drop):
config = model_drop.get_config()
for layer in config['layers']:
if 'Dropout' in layer['class_name']:
layer['inbound_nodes'][0][0][3]['training'] = False
model = tf.keras.Model().from_config(config)
model.set_weights(model_drop.get_weights())
return model
def ds2numpy(ds, max_num_batches):
x, y = [], []
for batch in ds.take(max_num_batches):
x.append(batch[0].numpy())
y.append(batch[1].numpy())
x = np.concatenate(x, axis=0)
y = np.concatenate(y, axis=0)
return x, y
| 27.565217 | 63 | 0.641956 |
7948c9b20366c15ff6b2ff665e5e6b77ab954b0c
| 13,010 |
py
|
Python
|
ParseSpectra.py
|
wjs018/DNASpectra
|
1d62955417651488e6289fcdb4cffa0911663d02
|
[
"MIT"
] | null | null | null |
ParseSpectra.py
|
wjs018/DNASpectra
|
1d62955417651488e6289fcdb4cffa0911663d02
|
[
"MIT"
] | null | null | null |
ParseSpectra.py
|
wjs018/DNASpectra
|
1d62955417651488e6289fcdb4cffa0911663d02
|
[
"MIT"
] | null | null | null |
"""Parses spectra text files from the Rogers spectrometer and calculates the
absorption spectra for DNA (260 nm).
A lot of the automation in this program is done through using filename
conventions. See README for complete documentation regarding filename
conventions.
"""
import os
import re
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
class Spectrum:
"""A class containing all the data from a single spectrum."""
def __init__(self, eth_gly, mM_NaCl, temperature, time=None):
"""The ethylene glycol, mM NaCl, and temperature need to be set in the
call initializing the spectrum. Time is an optional parameter to be
used in time series and ramp experiments."""
self.eth_gly = eth_gly
self.mM_NaCl = mM_NaCl
self.temperature = temperature
self.time = time
if temperature == 'blank':
self.blank = True
else:
self.blank = False
def add_data(self, lambdas, intensities):
"""Add the wavelength and intensity data to the spectrum. Should be in
the form of numpy arrays."""
self.lambdas = lambdas
self.intensities = intensities
class Experiment:
"""A class containing all the spectra from a single experiment."""
def __init__(self, exp_type, ramp=None):
"""Define the experiment type when initializing an Experiment. Can
either be 'time' for a time series measurement, 'temp' for a
temperature series experiment, or 'ramp' for a temperature ramp
experiment. If the exp_type is a ramp, then the parameter ramp must
be specified. It is a tuple containing three floats (start temperature,
stop temperature, temperature change per minute)."""
self.spectra_list = []
self.abs_list = []
self.blank_list = []
self.exp_type = exp_type
if ramp:
self.ramp_start = ramp[0]
self.ramp_stop = ramp[1]
self.ramp_grad = ramp[2]
def add_spectrum(self, spectrum):
"""Add one more spectrum to the experiment."""
if spectrum.blank == True:
self.blank_list.append(spectrum)
else:
self.spectra_list.append(spectrum)
def get_temps(self):
"""Return the list of temperatures for the spectra in the Experiment."""
temp_list = []
if self.exp_type == 'temp':
for spec in self.spectra_list:
temp_list.append(spec.temperature)
return temp_list
def calc_abs(self):
"""Calculate the absorbance spectra."""
if len(self.blank_list) == len(self.spectra_list):
# First, sort spectra and blanks by time to make sure they are in order
self.sort_spectra()
self.sort_spectra(type='blank')
for i in range(len(self.spectra_list)):
spec = self.spectra_list[i]
blank = self.blank_list[i]
trans = np.divide(
spec.intensities, blank.intensities)
trans = trans.clip(min=1e-10)
absorb = - np.log10(trans)
abs_spec = Spectrum(
spec.eth_gly, spec.mM_NaCl, spec.temperature)
abs_spec.add_data(spec.lambdas, absorb)
self.abs_list.append(abs_spec)
else:
print "Number of spectra and blanks do not match!"
def get_abs_maxes(self):
"""Get the 260 nm absorbance maximum from each spectrum."""
abs_max_list = []
if len(self.abs_list) == 0:
self.calc_abs()
if len(self.abs_list) == len(self.spectra_list):
lambdas = self.blank_list[0].lambdas
ix = np.where((258 < lambdas) & (lambdas < 262))
for spec in self.abs_list:
abs_max_list.append(np.max(spec.intensities[ix]))
return abs_max_list
def plot_melting(self):
"""Plots Absorbance vs. Temperature curve."""
if len(self.abs_list) == 0:
self.calc_abs()
if len(self.abs_list) == len(self.spectra_list):
temps = self.get_temps()
maxes = self.get_abs_maxes()
plt.plot(temps, maxes, 'o')
plt.title(str(self.spectra_list[0].eth_gly) + "% Ethylene Glycol")
plt.show()
def get_times(self, type=None):
"""Returns a list of times the spectra in the experiment were taken.
To get a list of times for the blank spectra, specify type='blank'."""
if self.exp_type == 'temp':
print "Experiment is wrong type for this function."
return None
time_list = []
if not type:
for spec in self.spectra_list:
time_list.append(spec.time)
elif type == 'blank':
for spec in self.blank_list:
time_list.append(spec.time)
return time_list
def sort_spectra(self, type=None):
"""Sorts the spectra according to timestamp. Specify type='blank'
to sort blank spectra."""
if self.exp_type == 'temp':
print "Experiment is wrong type for this function."
return None
time_list = self.get_times(type=type)
time_list.sort()
if not type:
spec_list = self.spectra_list
else:
spec_list = self.blank_list
sorted_spec_list = []
for timestamp in time_list:
for spec in spec_list:
if timestamp == spec.time:
sorted_spec_list.append(spec)
break
if not type:
self.spectra_list = sorted_spec_list
else:
self.blank_list = sorted_spec_list
def plot_time(self):
"""Plots absorption as a function of time."""
if len(self.abs_list) == 0:
self.calc_abs()
if len(self.abs_list) == len(self.spectra_list):
times = self.get_times()
maxes = self.get_abs_maxes()
plt.plot(times, maxes, 'o')
plt.title(str(self.spectra_list[0].eth_gly) + "% Ethylene Glycol")
plt.show()
def save(self, output_file):
"""Saves the results of the experiment. For time series and ramp
experiments, it will save a csv file with two columns, time and
absorbance. For temperature series, it will save a csv with two
columns, temperature and absorbance."""
import csv
outfile = open(output_file, 'wb')
writer = csv.writer(outfile)
if self.exp_type in ['time', 'ramp']:
col1 = self.get_times()
col2 = self.get_abs_maxes()
elif self.exp_type == 'temp':
col1 = self.get_temps()
col2 = self.get_abs_maxes()
for i in range(len(col1)):
writer.writerow([col1[i], col2[i]])
outfile.close()
def parse_folder(dir_path):
"""Parse the DNA spectra in the given directory. Returns a dictionary of
the Experiment objects discovered from the files in the directory."""
source_dir = dir_path
for root, dirs, files in os.walk(source_dir):
files = [os.path.join(root, f) for f in files if f.endswith('.txt')]
# Experiments distinguished according to ethylene glycol content
experiment_dict = {}
for spectrum_file in files:
# First, get the filename separate from the directory structure
os_test = os.path.join('test', 'path')
split_char = os_test[4]
filename = spectrum_file.split(split_char)[-1]
filename = filename[0:-4]
# Next, get the ethylene glycol content
filename_parts = filename.split('_')
chunks = len(filename_parts)
# Determine experiment type
if any('time' in s.lower() for s in filename_parts):
exp_type = 'time'
elif any('temp' in s.lower() for s in filename_parts):
exp_type = 'temp'
elif any('ramp' in s.lower() for s in filename_parts):
exp_type = 'ramp'
# Determine if this spectrum is a blank
blank_flag = False
if any('blank' in s.lower() for s in filename_parts):
temperature = 'blank'
blank_flag = True
for i in range(1, chunks - 1):
# Get the ethylene glycol content
if filename_parts[i] == 'Eth' and filename_parts[i + 1] == 'Gly' and filename_parts[i - 1].isdigit():
eth_gly = float(filename_parts[i - 1])
# Get the salt content
if filename_parts[i] == 'mM' and filename_parts[i + 1] == 'NaCl' and filename_parts[i - 1].isdigit():
mM_NaCl = float(filename_parts[i - 1])
# Extract the temperature if it is not a blank
if exp_type != 'ramp' or blank_flag == False:
temperature_inds = re.search("[0-9]C", filename)
temperature = float(filename[temperature_inds.start() - 1:temperature_inds.end() - 1])
# Actually read in the data from the text file (16 rows of header)
data = np.loadtxt(spectrum_file, delimiter="\t", skiprows=16)
lambdas = data[:, 0]
intensities = data[:, 1]
# Save to a Spectrum object
spectrum_obj = Spectrum(eth_gly, mM_NaCl, temperature)
spectrum_obj.add_data(lambdas, intensities)
# Check whether this is a temperature or time series experiment
if exp_type == 'temp':
# This is a temperature experiment
exp_time = None
elif exp_type in ['time', 'ramp']:
# This is a time series experiment, we need to extract the timestamp
time_str = filename_parts[-1]
time_parts = time_str.split('-')
# We need to convert strings into ints for the time object
for i in range(len(time_parts)):
time_parts[i] = int(time_parts[i])
exp_short_time = dt.time(
time_parts[0], time_parts[1], time_parts[2], time_parts[3])
today_date = dt.date.today()
exp_time = dt.datetime.combine(today_date, exp_short_time)
if exp_type == 'ramp':
# Need to get the ramp parameters
for i in range(chunks):
chunk = filename_parts[i]
temp_str = re.search("[0-9]C", chunk)
if temp_str:
if filename_parts[i+1].lower() == 'start':
ramp_start = float(chunk[temp_str.start() - 1:temp_str.end() - 1])
elif filename_parts[i+1].lower() == 'end':
ramp_stop = float(chunk[temp_str.start() - 1:temp_str.end() - 1])
elif filename_parts[i+1].lower() == 'min':
ramp_grad = float(chunk[temp_str.start() - 2:temp_str.end() - 1])
ramp_params = (ramp_start, ramp_stop, ramp_grad)
else:
ramp_params = None
exp_key = str(eth_gly) + '_' + str(mM_NaCl) + '_' + exp_type
# Save to a Spectrum object
spectrum_obj = Spectrum(eth_gly, mM_NaCl, temperature, time=exp_time)
spectrum_obj.add_data(lambdas, intensities)
# Add the spectrum to an existing Experiment or create a new one
if exp_key in experiment_dict:
experiment_dict[exp_key].add_spectrum(spectrum_obj)
else:
experiment_dict[exp_key] = Experiment(exp_type = exp_type, ramp = ramp_params)
experiment_dict[exp_key].add_spectrum(spectrum_obj)
# Return the dictionary of experiments
return experiment_dict
if __name__ == '__main__':
#=========================================================================
# Change the line below if you want to specify a different directory.
#=========================================================================
source_dir = '/media/sf_M_DRIVE/DNA Spectra/20160531'
experiment_dict = parse_folder(source_dir)
for key in experiment_dict:
print str(len(experiment_dict[key].spectra_list)) + ' data spectra in ' + key
print str(len(experiment_dict[key].blank_list)) + ' blank spectra in ' + key
# Plot results depending on type of experiment
for key in experiment_dict:
exp = experiment_dict[key]
if exp.exp_type == 'temp':
exp.plot_melting()
else:
exp.plot_time()
# Save results to a csv file
exp.save(os.path.join(source_dir, key + '.csv'))
| 30.97619 | 113 | 0.565642 |
7948ca70ffc9c2633c3ed57d337dd7a58652be38
| 393 |
py
|
Python
|
task_man/wsgi.py
|
vshumanov/django_task_mon
|
25ea1a4af986df5abb65fcaae8163abd8dc6cce9
|
[
"MIT"
] | null | null | null |
task_man/wsgi.py
|
vshumanov/django_task_mon
|
25ea1a4af986df5abb65fcaae8163abd8dc6cce9
|
[
"MIT"
] | null | null | null |
task_man/wsgi.py
|
vshumanov/django_task_mon
|
25ea1a4af986df5abb65fcaae8163abd8dc6cce9
|
[
"MIT"
] | null | null | null |
"""
WSGI config for task_man project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'task_man.settings')
application = get_wsgi_application()
| 23.117647 | 78 | 0.78626 |
7948ca7f37c2132ae50d7b0fb2c42336d60c9e03
| 2,868 |
py
|
Python
|
buildserver/pypi_utils.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | 5 |
2020-09-10T01:45:09.000Z
|
2022-01-10T23:24:03.000Z
|
buildserver/pypi_utils.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | 424 |
2020-08-24T06:22:59.000Z
|
2021-10-10T02:36:11.000Z
|
buildserver/pypi_utils.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | 7 |
2020-08-28T22:05:10.000Z
|
2022-03-04T12:47:05.000Z
|
import re
from typing import Optional, Tuple
from packaging.version import Version
import requests
from app_config import App
VERSION_REGEX = r"version=\"(\d*).(\d)*.\*\""
def get_latest_version(
app: App, pr_number: int
) -> Tuple[Optional[Version], Optional[Version]]:
"""
Finds the latest deployed version of the package, or `None` if no
such version can be found. If we are making a prod build, it just finds
the latest deployed prod version. If we are making a PR build, it returns
both the latest deployed prod version, as well as the latest pre-release for
that PR.
"""
package_name = app.config["package_name"]
data = requests.get(f"https://pypi.org/pypi/{package_name}/json")
if data.status_code == 404:
return None, None
versions = [Version(release) for release in data.json()["releases"]]
prod_versions = [version for version in versions if version.pre is None]
pr_versions = [
version
for version in versions
if version.pre is not None and version.pre[1] == pr_number
]
latest_prod = max(prod_versions) if prod_versions else None
latest_pr = max(pr_versions) if pr_versions else None
return latest_prod, latest_pr
def update_setup_py(app: App, pr_number: int):
with open("setup.py", "a+") as f:
f.seek(0)
setup_contents = f.read()
match = re.search(VERSION_REGEX, setup_contents)
if match is None:
raise Exception("Could not find version in setup.py")
major, minor = int(match.group(1)), int(match.group(2))
latest_prod_version, latest_pr_version = get_latest_version(app, pr_number)
if latest_prod_version is None:
micro = 0
else:
if (
latest_prod_version.major > major
or latest_prod_version.major == major
and latest_prod_version.minor > minor
):
raise Exception(
f"Latest version {latest_prod_version} is greater than current build {major}.{minor}"
)
if (
latest_prod_version.major == major
and latest_prod_version.minor == minor
):
micro = latest_prod_version.micro + 1
else:
micro = 0
if latest_pr_version is None:
dev_number = 0
else:
dev_number = latest_pr_version.dev + 1
if pr_number == 0:
next_version = Version(f"{major}.{minor}.{micro}")
else:
next_version = Version(
f"{major}.{minor}.{micro}b{pr_number}.dev{dev_number}"
)
f.seek(0)
f.truncate()
f.write(re.sub(VERSION_REGEX, f'version="{next_version}"', setup_contents))
app.deployed_pypi_version = str(next_version)
| 33.741176 | 105 | 0.609484 |
7948ca820f8cb576363125ddbc7c1c445344afc0
| 373 |
py
|
Python
|
src/simulator/services/persistent_state/persistent_state_object.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 46 |
2020-12-25T04:09:15.000Z
|
2022-03-25T12:32:42.000Z
|
src/simulator/services/persistent_state/persistent_state_object.py
|
ed741/PathBench
|
50fe138eb1f824f49fe1a862705e435a1c3ec3ae
|
[
"BSD-3-Clause"
] | 36 |
2020-12-21T16:10:02.000Z
|
2022-01-03T01:42:01.000Z
|
src/simulator/services/persistent_state/persistent_state_object.py
|
judicaelclair/PathBenchURO
|
101e67674efdfa8e27e1cf7787dac9fdf99552fe
|
[
"BSD-3-Clause"
] | 11 |
2021-01-06T23:34:12.000Z
|
2022-03-21T17:21:47.000Z
|
from abc import ABC, abstractmethod
from typing import Dict, Any
class PersistentStateObject(ABC):
_state: 'PersistentState'
def __init__(self, state: 'PersistentState'):
self._state = state
@abstractmethod
def _from_json(self, data: Dict[str, Any]) -> None:
...
@abstractmethod
def _to_json(self) -> Dict[str, Any]:
...
| 21.941176 | 55 | 0.646113 |
7948cae67a8c512feed0fe22f6b69a61310b2dd0
| 21,486 |
py
|
Python
|
examples/lm1b/train.py
|
davisyoshida/flax
|
d03a262249671db92f9924f990437907cac36b21
|
[
"Apache-2.0"
] | 4 |
2020-05-28T11:25:47.000Z
|
2021-04-30T13:08:48.000Z
|
examples/lm1b/train.py
|
n2cholas/flax
|
0848d9cf203cc5dfe63d4a5dece3c6aa8e135107
|
[
"Apache-2.0"
] | null | null | null |
examples/lm1b/train.py
|
n2cholas/flax
|
0848d9cf203cc5dfe63d4a5dece3c6aa8e135107
|
[
"Apache-2.0"
] | 2 |
2021-04-30T13:08:58.000Z
|
2021-12-08T00:50:25.000Z
|
# Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Language Modeling example.
This script trains a Transformer on a LM1B dataset.
"""
# pytype: disable=wrong-arg-count
# pytype: disable=attribute-error
import collections
import functools
import os
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
from flax import linen as nn
from flax import optim
import input_pipeline
import models
import temperature_sampler
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.numpy as jnp
import ml_collections
import numpy as np
import tensorflow as tf
def create_learning_rate_scheduler(
factors="constant * linear_warmup * rsqrt_decay",
base_learning_rate=0.5,
warmup_steps=1000,
decay_factor=0.5,
steps_per_decay=20000,
steps_per_cycle=100000):
"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""
factors = [n.strip() for n in factors.split("*")]
def step_fn(step):
"""Step to learning rate function."""
ret = 1.0
for name in factors:
if name == "constant":
ret *= base_learning_rate
elif name == "linear_warmup":
ret *= jnp.minimum(1.0, step / warmup_steps)
elif name == "rsqrt_decay":
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "rsqrt_normalized_decay":
ret *= jnp.sqrt(warmup_steps)
ret /= jnp.sqrt(jnp.maximum(step, warmup_steps))
elif name == "decay_every":
ret *= (decay_factor**(step // steps_per_decay))
elif name == "cosine_decay":
progress = jnp.maximum(0.0,
(step - warmup_steps) / float(steps_per_cycle))
ret *= jnp.maximum(0.0,
0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0))))
else:
raise ValueError("Unknown factor %s." % name)
return jnp.asarray(ret, dtype=jnp.float32)
return step_fn
def compute_weighted_cross_entropy(logits,
targets,
weights=None,
label_smoothing=0.0):
"""Compute weighted cross entropy and entropy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length].
label_smoothing: label smoothing constant, used to determine the on and off
values.
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError("Incorrect shapes. Got shape %s logits and %s targets" %
(str(logits.shape), str(targets.shape)))
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) +
(vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20))
soft_targets = common_utils.onehot(
targets, vocab_size, on_value=confidence, off_value=low_confidence)
loss = -jnp.sum(soft_targets * nn.log_softmax(logits), axis=-1)
loss = loss - normalizing_constant
normalizing_factor = np.prod(targets.shape)
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_weighted_accuracy(logits, targets, weights=None):
"""Compute weighted accuracy for log probs and targets.
Args:
logits: [batch, length, num_classes] float array.
targets: categorical targets [batch, length] int array.
weights: None or array of shape [batch, length]
Returns:
Tuple of scalar loss and batch normalizing factor.
"""
if logits.ndim != targets.ndim + 1:
raise ValueError("Incorrect shapes. Got shape %s logits and %s targets" %
(str(logits.shape), str(targets.shape)))
loss = jnp.equal(jnp.argmax(logits, axis=-1), targets)
normalizing_factor = np.prod(logits.shape[:-1])
if weights is not None:
loss = loss * weights
normalizing_factor = weights.sum()
return loss.sum(), normalizing_factor
def compute_metrics(logits, labels, weights, label_smoothing=0.0):
"""Compute summary metrics."""
loss, weight_sum = compute_weighted_cross_entropy(logits, labels, weights,
label_smoothing)
acc, _ = compute_weighted_accuracy(logits, labels, weights)
metrics = {
"loss": loss,
"accuracy": acc,
"denominator": weight_sum,
}
metrics = jax.lax.psum(metrics, axis_name="batch")
return metrics
# Primary training / eval / decode step functions.
# -----------------------------------------------------------------------------
def train_step(optimizer,
batch,
config,
learning_rate_fn,
label_smoothing=0.0,
dropout_rng=None):
"""Perform a single training step."""
# X_position and X_segmentation are needed only when using "packed examples"
# where multiple sequences are packed into the same example with this
# metadata.
# if such features are not present they are ignored and the example is treated
# like a normal, unpacked sequence example.
train_keys = ["inputs", "inputs_position", "inputs_segmentation"]
(inputs, inputs_positions, inputs_segmentation
) = [batch.get(k, None) for k in train_keys]
weights = jnp.where(inputs > 0, 1, 0).astype(jnp.float32)
dropout_rng = jax.random.fold_in(dropout_rng, optimizer.state.step)
def loss_fn(params):
"""loss function used for training."""
logits = models.TransformerLM(config).apply(
{"params": params},
inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation,
rngs={"dropout": dropout_rng})
loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights,
label_smoothing)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, "batch")
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, inputs, weights)
metrics["learning_rate"] = lr
return new_optimizer, metrics
def eval_step(params, batch, config, label_smoothing=0.0):
"""Calculate evaluation metrics on a batch."""
inputs = batch["inputs"]
weights = jnp.where(inputs > 0, 1.0, 0.0)
logits = models.TransformerLM(config).apply({"params": params}, inputs)
return compute_metrics(logits, inputs, weights, label_smoothing)
def predict_step(inputs,
params,
rngkey,
eos_id,
max_decode_len,
config,
temperature,
top_k):
"""Predict language model on a batch."""
target_shape = (inputs.shape[0], max_decode_len) + inputs.shape[2:]
initial_variables = models.TransformerLM(config).init(
jax.random.PRNGKey(0),
jnp.ones(target_shape, config.dtype))
cache = initial_variables["cache"]
def tokens_ids_to_logits(flat_ids, flat_cache):
"""Token slice to logits from decoder model."""
# --> [batch * beam, 1, vocab]
flat_logits, new_vars = models.TransformerLM(config).apply(
{
"params": params,
"cache": flat_cache
},
flat_ids,
mutable=["cache"])
new_flat_cache = new_vars["cache"]
# Remove singleton sequence-length dimension:
# [batch, 1, vocab] --> [batch, vocab]
flat_logits = flat_logits.squeeze(axis=1)
return flat_logits, new_flat_cache
# Using the above-defined single-step decoder function, run a
# beam search over possible sequences given input encoding.
seqs = temperature_sampler.temperature_sample(
inputs,
cache,
tokens_ids_to_logits,
rngkey,
temperature=temperature,
topk=top_k,
eos_token=eos_id)
return seqs
# Utils for prediction
# -----------------------------------------------------------------------------
def pad_examples(x, desired_batch_size):
"""Expand batch to desired size by repeating last slice."""
batch_pad = desired_batch_size - x.shape[0]
return np.concatenate([x, np.tile(x[-1], (batch_pad, 1))], axis=0)
def per_host_sum_pmap(in_tree):
"""Execute psum on in_tree"s leaves over one device per host."""
host2devices = collections.defaultdict(list)
for d in jax.devices():
host2devices[d.host_id].append(d)
devices = [host2devices[k][0] for k in host2devices]
host_psum = jax.pmap(lambda x: jax.lax.psum(x, "i"), "i", devices=devices)
def pre_pmap(xs):
return jax.tree_map(lambda x: jnp.broadcast_to(x, (1,) + x.shape), xs)
def post_pmap(xs):
return jax.tree_map(lambda x: x[0], xs)
return post_pmap(host_psum(pre_pmap(in_tree)))
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def evaluate(*, p_eval_step, target, eval_ds: tf.data.Dataset,
num_eval_steps: int):
"""Evaluate the target an return a dictionary with the metrics."""
logging.info("Gathering evaluation metrics.")
eval_metrics = []
eval_iter = iter(eval_ds) # pytype: disable=wrong-arg-types
for _, eval_batch in zip(range(num_eval_steps), eval_iter):
eval_batch = jax.tree_map(lambda x: x._numpy(), eval_batch) # pylint: disable=protected-access
eval_batch = common_utils.shard(eval_batch)
metrics = p_eval_step(target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop("denominator")
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
return eval_summary
def generate_prediction(*, p_pred_step, target,
tokenized_prompts,
eos_id,
inference_rng,
decode_tokens,
max_predict_length: int):
"""Generate text from the prompt."""
n_devices = jax.local_device_count()
logging.info("Generating text.")
predictions = []
# Use batch of prompts provided by user.
for pred_batch in jnp.array_split(
tokenized_prompts, int(np.ceil(len(tokenized_prompts) / n_devices))):
cur_pred_batch_size = pred_batch.shape[0]
if cur_pred_batch_size % n_devices:
padded_size = int(
np.ceil(cur_pred_batch_size / n_devices) * n_devices)
pred_batch = jax.tree_map(
lambda x: pad_examples(x, padded_size), pred_batch) # pylint: disable=cell-var-from-loop
pred_batch = common_utils.shard(pred_batch)
inference_rng, sub_rng = random.split(inference_rng)
inference_rngs = random.split(sub_rng, n_devices)
predicted = p_pred_step(pred_batch, target, inference_rngs,
eos_id, max_predict_length)
predicted = tohost(predicted)
# Iterate through non-padding examples of batch.
for s in predicted[:cur_pred_batch_size]:
prediction = decode_tokens(s)
logging.info("Sample: %s", str(prediction))
predictions.append(prediction)
# Save generated texts for tensorboard.
exemplars = ""
for prediction in predictions:
exemplars += f"{prediction}\n\n"
return exemplars
def train_and_evaluate(config: ml_collections.ConfigDict, workdir: str):
"""Runs a training and evaluation loop.
Args:
config: Configuration to use.
workdir: Working directory for checkpoints and TF summaries. If this
contains checkpoint training will be resumed from the latest checkpoint.
"""
tf.io.gfile.makedirs(workdir)
vocab_path = config.vocab_path
if vocab_path is None:
vocab_path = os.path.join(workdir, "sentencepiece_model")
config.vocab_path = vocab_path
tf.io.gfile.makedirs(os.path.split(vocab_path)[0])
# Load Dataset
# ---------------------------------------------------------------------------
logging.info("Initializing dataset.")
train_ds, eval_ds, _, encoder = input_pipeline.get_datasets(
n_devices=jax.local_device_count(),
config=config,
vocab_path=vocab_path)
train_iter = iter(train_ds)
vocab_size = int(encoder.vocab_size())
eos_id = temperature_sampler.EOS_ID # Default Sentencepiece EOS token.
def decode_tokens(toks):
valid_toks = toks[:np.argmax(toks == eos_id) + 1].astype(np.int32)
return encoder.detokenize(valid_toks).numpy().decode("utf-8")
def encode_strings(strs, max_len):
tokenized_batch = np.zeros((len(strs), max_len), np.int32)
for i, s in enumerate(strs):
toks = encoder.tokenize(s).numpy()
# Remove EOS token in prompt.
tokenized_batch[i, :toks.shape[0]-1] = toks[:-1]
return tokenized_batch
tokenized_prompts = encode_strings(
[config.prompts], config.max_predict_length)
logging.info("Initializing model, optimizer, and step functions.")
# Build Model and Optimizer
# ---------------------------------------------------------------------------
train_config = models.TransformerConfig(
vocab_size=vocab_size,
output_vocab_size=vocab_size,
logits_via_embedding=config.logits_via_embedding,
dtype=jnp.bfloat16 if config.use_bfloat16 else jnp.float32,
emb_dim=config.emb_dim,
num_heads=config.num_heads,
num_layers=config.num_layers,
qkv_dim=config.qkv_dim,
mlp_dim=config.mlp_dim,
max_len=max(config.max_target_length, config.max_eval_target_length),
dropout_rate=config.dropout_rate,
attention_dropout_rate=config.attention_dropout_rate,
deterministic=False,
decode=False,
kernel_init=nn.initializers.xavier_uniform(),
bias_init=nn.initializers.normal(stddev=1e-6))
eval_config = train_config.replace(deterministic=True)
predict_config = train_config.replace(deterministic=True, decode=True)
start_step = 0
rng = jax.random.PRNGKey(config.seed)
rng, init_rng = jax.random.split(rng)
rng, inference_rng = random.split(rng)
input_shape = (config.per_device_batch_size, config.max_target_length)
m = models.TransformerLM(eval_config)
initial_variables = jax.jit(m.init)(init_rng,
jnp.ones(input_shape, jnp.float32))
# apply an optimizer to this tree
optimizer_def = optim.Adam(
config.learning_rate,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=config.weight_decay)
optimizer = optimizer_def.create(initial_variables["params"])
# We access model params only from optimizer below via optimizer.target.
del initial_variables
if config.restore_checkpoints:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(workdir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
writer = metric_writers.create_default_writer(
workdir, just_logging=jax.host_id() > 0)
if start_step == 0:
writer.write_hparams(dict(config))
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = create_learning_rate_scheduler(
base_learning_rate=config.learning_rate, warmup_steps=config.warmup_steps)
# compile multidevice versions of train/eval/predict step fn.
p_train_step = jax.pmap(
functools.partial(
train_step,
config=train_config,
learning_rate_fn=learning_rate_fn),
axis_name="batch",
donate_argnums=(0,)) # pytype: disable=wrong-arg-types
p_eval_step = jax.pmap(
functools.partial(
eval_step, config=eval_config),
axis_name="batch")
p_pred_step = jax.pmap(
functools.partial(
predict_step, config=predict_config,
temperature=config.sampling_temperature,
top_k=config.sampling_top_k),
axis_name="batch",
static_broadcasted_argnums=(3, 4)) # eos token, max_length are constant
# Main Train Loop
# ---------------------------------------------------------------------------
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap"d training update for performance.
dropout_rngs = jax.random.split(rng, jax.local_device_count())
del rng
logging.info("Starting training loop.")
hooks = []
report_progress = periodic_actions.ReportProgress(
num_train_steps=config.num_train_steps, writer=writer)
if jax.host_id() == 0:
hooks += [
report_progress,
periodic_actions.Profile(logdir=workdir, num_profile_steps=5)
]
train_metrics = []
with metric_writers.ensure_flushes(writer):
for step in range(start_step, config.num_train_steps):
is_last_step = step == config.num_train_steps - 1
# Shard data to devices and do a training step.
with jax.profiler.StepTraceAnnotation("train", step_num=step):
batch = common_utils.shard(jax.tree_map(np.asarray, next(train_iter)))
optimizer, metrics = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
train_metrics.append(metrics)
# Quick indication that training is happening.
logging.log_first_n(logging.INFO, "Finished training step %d.", 5, step)
for h in hooks:
h(step)
# Periodic metric handling.
if step % config.eval_every_steps == 0 or is_last_step:
with report_progress.timed("training_metrics"):
logging.info("Gathering training metrics.")
train_metrics = common_utils.get_metrics(train_metrics)
lr = train_metrics.pop("learning_rate").mean()
metrics_sums = jax.tree_map(jnp.sum, train_metrics)
denominator = metrics_sums.pop("denominator")
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary["learning_rate"] = lr
summary["perplexity"] = jnp.clip(
jnp.exp(summary["loss"]), a_max=1.0e4)
summary = {"train_" + k: v for k, v in summary.items()}
writer.write_scalars(step, summary)
train_metrics = []
with report_progress.timed("eval"):
eval_results = evaluate(
p_eval_step=p_eval_step,
target=optimizer.target,
eval_ds=eval_ds,
num_eval_steps=config.num_eval_steps)
# (clipped) perplexity after averaging log-perplexitie
eval_results["perplexity"] = jnp.clip(
jnp.exp(eval_results["loss"]), a_max=1.0e4)
writer.write_scalars(
step, {"eval_" + k: v for k, v in eval_results.items()})
with report_progress.timed("generate_text"):
exemplars = generate_prediction(
p_pred_step=p_pred_step,
target=optimizer.target,
tokenized_prompts=tokenized_prompts,
eos_id=eos_id,
inference_rng=inference_rng,
decode_tokens=decode_tokens,
max_predict_length=config.max_predict_length)
writer.write_texts(step, {"samples": exemplars})
# Save a checkpoint on one host after every checkpoint_freq steps.
save_checkpoint = (step % config.checkpoint_every_steps == 0 or
is_last_step)
if config.save_checkpoints and save_checkpoint and jax.host_id() == 0:
with report_progress.timed("checkpoint"):
checkpoints.save_checkpoint(workdir, jax_utils.unreplicate(optimizer),
step)
| 36.854202 | 111 | 0.670343 |
7948caff8f35d998bedb3815629c611a69beee47
| 10,370 |
py
|
Python
|
banana/interfaces/mrtrix/fibre_est.py
|
tclose/nianalysis
|
314b7ade1d36d52817aa1de14ce1be22c5996a8e
|
[
"Apache-2.0"
] | 1 |
2019-07-30T03:45:57.000Z
|
2019-07-30T03:45:57.000Z
|
banana/interfaces/mrtrix/fibre_est.py
|
tclose/nianalysis
|
314b7ade1d36d52817aa1de14ce1be22c5996a8e
|
[
"Apache-2.0"
] | null | null | null |
banana/interfaces/mrtrix/fibre_est.py
|
tclose/nianalysis
|
314b7ade1d36d52817aa1de14ce1be22c5996a8e
|
[
"Apache-2.0"
] | null | null | null |
import os.path
from nipype.interfaces.base import (
traits, InputMultiPath, File, TraitedSpec, isdefined)
from nipype.interfaces.mrtrix3.reconst import (
MRTrix3Base, MRTrix3BaseInputSpec)
from nipype.interfaces.mrtrix3.preprocess import (
ResponseSD as NipypeResponseSD,
ResponseSDInputSpec as NipypeResponseSDInputSpec)
from arcana.utils import split_extension
class Fod2FixelInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class Fod2FixelOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class Fod2Fixel(MRTrix3Base):
"""Fod2Fixel"""
_cmd = "fod2fixel"
input_spec = Fod2FixelInputSpec
output_spec = Fod2FixelOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class Fixel2VoxelInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class Fixel2VoxelOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class Fixel2Voxel(MRTrix3Base):
"""Fixel2Voxel"""
_cmd = "fixel2voxel"
input_spec = Fixel2VoxelInputSpec
output_spec = Fixel2VoxelOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class FixelCorrespondenceInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class FixelCorrespondenceOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class FixelCorrespondence(MRTrix3Base):
"""FixelCorrespondence"""
_cmd = "fixelcorrespondence"
input_spec = FixelCorrespondenceInputSpec
output_spec = FixelCorrespondenceOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class FixelCFEStatsInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class FixelCFEStatsOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class FixelCFEStats(MRTrix3Base):
"""FixelCFEStats"""
_cmd = "fixelcfestats"
input_spec = FixelCFEStatsInputSpec
output_spec = FixelCFEStatsOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class TckSiftInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class TckSiftOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class TckSift(MRTrix3Base):
"""TckSift"""
_cmd = "tcksift"
input_spec = TckSiftInputSpec
output_spec = TckSiftOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class Warp2MetricInputSpec(MRTrix3BaseInputSpec):
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input files')
out_file = File(genfile=True, argstr='%s', desc=(""), position=-1)
class Warp2MetricOutputSpec(TraitedSpec):
out_file = File(exists=True, desc=(""))
class Warp2Metric(MRTrix3Base):
"""Warp2Metric"""
_cmd = "warp2metric"
input_spec = Warp2MetricInputSpec
output_spec = Warp2MetricOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
return outputs
class AverageReponseInputSpec(MRTrix3BaseInputSpec):
in_files = InputMultiPath(
File(exists=True), argstr='%s', mandatory=True,
position=0, desc="Average response")
out_file = File(
genfile=True, argstr='%s', position=-1,
desc=("the output spherical harmonics coefficients image"))
class AverageReponseOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output response file')
class AverageResponse(MRTrix3Base):
_cmd = 'average_response'
input_spec = AverageReponseInputSpec
output_spec = AverageReponseOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self._gen_outfilename())
return outputs
def _gen_filename(self, name):
if name == 'out_file':
fname = self._gen_outfilename()
else:
assert False
return fname
def _gen_outfilename(self):
if isdefined(self.inputs.out_file):
filename = self.inputs.out_file
else:
base, ext = split_extension(
os.path.basename(self.inputs.in_files[0]))
filename = os.path.join(os.getcwd(),
"{}_avg{}".format(base, ext))
return filename
class EstimateFODInputSpec(MRTrix3BaseInputSpec):
algorithm = traits.Enum('csd', 'msmt_csd', argstr='%s', mandatory=True,
position=0,
desc="Algorithm used for CSD estimation")
in_file = File(exists=True, argstr='%s', mandatory=True, position=-3,
desc='input diffusion weighted images')
response = File(
exists=True, argstr='%s', mandatory=True, position=-2,
desc=('a text file containing the diffusion-weighted signal response '
'function coefficients for a single fibre population'))
out_file = File(
'fods.mif', argstr='%s', mandatory=True, position=-1,
usedefault=True, desc=('the output spherical harmonics coefficients'
' image'))
# DW Shell selection parameters
shell = traits.List(traits.Float, sep=',', argstr='-shell %s',
desc='specify one or more dw gradient shells')
# Spherical deconvolution parameters
max_sh = traits.Int(8, argstr='-lmax %d',
desc='maximum harmonic degree of response function')
in_mask = File(exists=True, argstr='-mask %s',
desc='provide initial mask image')
in_dirs = File(
exists=True, argstr='-directions %s',
desc=('specify the directions over which to apply the non-negativity '
'constraint (by default, the built-in 300 direction set is '
'used). These should be supplied as a text file containing the '
'[ az el ] pairs for the directions.'))
sh_filter = File(
exists=True, argstr='-filter %s',
desc=('the linear frequency filtering parameters used for the initial '
'linear spherical deconvolution step (default = [ 1 1 1 0 0 ]). '
'These should be supplied as a text file containing the '
'filtering coefficients for each even harmonic order.'))
neg_lambda = traits.Float(
1.0, argstr='-neg_lambda %f',
desc=('the regularisation parameter lambda that controls the strength'
' of the non-negativity constraint'))
thres = traits.Float(
0.0, argstr='-threshold %f',
desc=('the threshold below which the amplitude of the FOD is assumed '
'to be zero, expressed as an absolute amplitude'))
n_iter = traits.Int(
50, argstr='-niter %d', desc=('the maximum number of iterations '
'to perform for each voxel'))
class EstimateFODOutputSpec(TraitedSpec):
out_file = File(exists=True, desc='the output response file')
class EstimateFOD(MRTrix3Base):
"""
Convert diffusion-weighted images to tensor images
Note that this program makes use of implied symmetries in the diffusion
profile. First, the fact the signal attenuation profile is real implies
that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)* (where * denotes
the complex conjugate). Second, the diffusion profile should be
antipodally symmetric (i.e. S(x) = S(-x)), implying that all odd l
components should be zero. Therefore, this program only computes the even
elements.
Note that the spherical harmonics equations used here differ slightly from
those conventionally used, in that the (-1)^m factor has been omitted.
This should be taken into account in all subsequent calculations.
The spherical harmonic coefficients are stored as follows. First, since
the signal attenuation profile is real, it has conjugate symmetry, i.e.
Y(l,-m) = Y(l,m)* (where * denotes the complex conjugate). Second, the
diffusion profile should be antipodally symmetric (i.e. S(x) = S(-x)),
implying that all odd l components should be zero. Therefore, only the
even elements are computed.
Note that the spherical harmonics equations used here differ slightly from
those conventionally used, in that the (-1)^m factor has been omitted.
This should be taken into account in all subsequent calculations.
Each volume in the output image corresponds to a different spherical
harmonic component. Each volume will correspond to the following:
volume 0: l = 0, m = 0
volume 1: l = 2, m = -2 (imaginary part of m=2 SH)
volume 2: l = 2, m = -1 (imaginary part of m=1 SH)
volume 3: l = 2, m = 0
volume 4: l = 2, m = 1 (real part of m=1 SH)
volume 5: l = 2, m = 2 (real part of m=2 SH)
etc...
Example
-------
>>> import nipype.interfaces.mrtrix3 as mrt
>>> fod = mrt.EstimateFOD()
>>> fod.inputs.in_file = 'dwi.mif'
>>> fod.inputs.response = 'response.txt'
>>> fod.inputs.in_mask = 'mask.nii.gz'
>>> fod.inputs.grad_fsl = ('bvecs', 'bvals')
>>> fod.cmdline
'dwi2fod -fslgrad bvecs bvals -mask mask.nii.gz dwi.mif response.txt\
fods.mif'
>>> fod.run()
"""
_cmd = 'dwi2fod'
input_spec = EstimateFODInputSpec
output_spec = EstimateFODOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
| 33.025478 | 79 | 0.65487 |
7948cb1a7d5966fb7bc26109390b9e366d37fe9e
| 2,159 |
py
|
Python
|
src/collaborative_recom.py
|
Sinba7/Movie-Recommender
|
802c9c66739ef1c49287038aca06cb6943a79bd8
|
[
"MIT"
] | 1 |
2021-01-28T06:29:31.000Z
|
2021-01-28T06:29:31.000Z
|
src/collaborative_recom.py
|
Sinba7/Movie-Recommender
|
802c9c66739ef1c49287038aca06cb6943a79bd8
|
[
"MIT"
] | 1 |
2020-12-18T03:58:04.000Z
|
2020-12-18T03:58:04.000Z
|
src/collaborative_recom.py
|
Sinba7/Movie-Recommender
|
802c9c66739ef1c49287038aca06cb6943a79bd8
|
[
"MIT"
] | null | null | null |
import pandas as pd
from surprise import Dataset, Reader, KNNWithMeans, KNNWithZScore
import logbook
import sys
# logging
function_log = logbook.Logger('RECOMMENDER')
level = logbook.TRACE
logbook.StreamHandler(sys.stdout, level=level).push_application()
def collab_recommender(train_data, test_data, user_based=True, normalization= False, k=100, sim='cosine'):
"""
Input:
- train_data: dataframe, n*3, columns are ['userid','movieid','rating']
- test_data: dataframe, n*2, columns are ['userid', 'movieid']
- user_base: boolean, use user-based knn algorithm if True, use item-based knn algorithm if False
- normalization: boolean, conduct z-score normalization on user/item matrix if True
- k: int, number of nearest neighbors
- sim: string, define the similarity matrix from ['cosine', 'pearson', 'msd', 'pearson_baseline']
Output:
- pred_rating: dataframe, n*2, columns are ['movieid', 'rating']
"""
try:
function_log.trace('Start collaborative recommendation function')
reader = Reader(rating_scale=(1,5))
data = Dataset.load_from_df(train_data, reader)
sim_options = {
'name':sim,
'user_based': user_based
}
if normalization:
algo = KNNWithZScore(k=k, sim_options=sim_options, verbose=False)
else:
algo = KNNWithMeans(k=k, sim_options=sim_options, verbose=False)
train_set = data.build_full_trainset()
algo.fit(train_set)
pred_rating = {'movieid':[], 'rating':[]}
for idx in test_data.index:
pred_rating['movieid'].append(test_data.loc[idx, 'movieid'])
pred = algo.predict(test_data.loc[idx, 'userid'], test_data.loc[idx, 'movieid'])
pred_rating['rating'].append(pred.est)
function_log.trace('Finish collaborative recommendation function')
return pd.DataFrame(pred_rating)
except ValueError:
function_log.warn("Training and test data cannot be none.")
raise ValueError
except Exception as x:
function_log.exception(f'collaborative recommendation function failed {x}')
| 36.59322 | 106 | 0.670218 |
7948cbba9a58424c8618696775d4a232f0d17be4
| 12,259 |
py
|
Python
|
openpype/hosts/nuke/plugins/load/load_effects_ip.py
|
icyvapor/OpenPype
|
bf39fc72a550ce8ac6fc8d4c07bc2359ec9cdfa8
|
[
"MIT"
] | null | null | null |
openpype/hosts/nuke/plugins/load/load_effects_ip.py
|
icyvapor/OpenPype
|
bf39fc72a550ce8ac6fc8d4c07bc2359ec9cdfa8
|
[
"MIT"
] | null | null | null |
openpype/hosts/nuke/plugins/load/load_effects_ip.py
|
icyvapor/OpenPype
|
bf39fc72a550ce8ac6fc8d4c07bc2359ec9cdfa8
|
[
"MIT"
] | null | null | null |
import json
from collections import OrderedDict
import nuke
from avalon import api, style, io
from openpype.hosts.nuke.api import lib
from openpype.hosts.nuke.api import (
containerise,
update_container,
viewer_update_and_undo_stop
)
class LoadEffectsInputProcess(api.Loader):
"""Loading colorspace soft effect exported from nukestudio"""
representations = ["effectJson"]
families = ["effect"]
label = "Load Effects - Input Process"
order = 0
icon = "eye"
color = style.colors.alert
ignore_attr = ["useLifetime"]
def load(self, context, name, namespace, data):
"""
Loading function to get the soft effects to particular read node
Arguments:
context (dict): context of version
name (str): name of the version
namespace (str): asset name
data (dict): compulsory attribute > not used
Returns:
nuke node: containerised nuke node object
"""
# get main variables
version = context['version']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = namespace or context['asset']['name']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# prepare data for imprinting
# add additional metadata from the version to imprint to Avalon knob
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# getting file path
file = self.fname.replace("\\", "/")
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
GN = nuke.createNode(
"Group",
"name {}_1".format(object_name))
# adding content to the group node
with GN:
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
continue
try:
node[k].value()
except NameError as e:
self.log.warning(e)
continue
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# try to place it under Viewer1
if not self.connect_active_viewer(GN):
nuke.delete(GN)
return
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
return containerise(
node=GN,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
data=data_imprint)
def update(self, container, representation):
"""Update the Loader's path
Nuke automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
"""
# get main variables
# Get version from io
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
# get corresponding node
GN = nuke.toNode(container['objectName'])
file = api.get_representation_path(representation).replace("\\", "/")
name = container['name']
version_data = version.get("data", {})
vname = version.get("name", None)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
namespace = container['namespace']
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
"source", "author", "fps"]
data_imprint = {"representation": str(representation["_id"]),
"frameStart": first,
"frameEnd": last,
"version": vname,
"colorspaceInput": colorspace,
"objectName": object_name}
for k in add_keys:
data_imprint.update({k: version_data[k]})
# Update the imprinted representation
update_container(
GN,
data_imprint
)
# getting data from json file with unicode conversion
with open(file, "r") as f:
json_f = {self.byteify(key): self.byteify(value)
for key, value in json.load(f).iteritems()}
# get correct order of nodes by positions on track and subtrack
nodes_order = self.reorder_nodes(json_f)
# adding nodes to node graph
# just in case we are in group lets jump out of it
nuke.endGroup()
# adding content to the group node
with GN:
# first remove all nodes
[nuke.delete(n) for n in nuke.allNodes()]
# create input node
pre_node = nuke.createNode("Input")
pre_node["name"].setValue("rgb")
for ef_name, ef_val in nodes_order.items():
node = nuke.createNode(ef_val["class"])
for k, v in ef_val["node"].items():
if k in self.ignore_attr:
continue
try:
node[k].value()
except NameError as e:
self.log.warning(e)
continue
if isinstance(v, list) and len(v) > 4:
node[k].setAnimated()
for i, value in enumerate(v):
if isinstance(value, list):
for ci, cv in enumerate(value):
node[k].setValueAt(
cv,
(workfile_first_frame + i),
ci)
else:
node[k].setValueAt(
value,
(workfile_first_frame + i))
else:
node[k].setValue(v)
node.setInput(0, pre_node)
pre_node = node
# create output node
output = nuke.createNode("Output")
output.setInput(0, pre_node)
# # try to place it under Viewer1
# if not self.connect_active_viewer(GN):
# nuke.delete(GN)
# return
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# change color of node
if version.get("name") not in [max_version]:
GN["tile_color"].setValue(int("0xd84f20ff", 16))
else:
GN["tile_color"].setValue(int("0x3469ffff", 16))
self.log.info("udated to version: {}".format(version.get("name")))
def connect_active_viewer(self, group_node):
"""
Finds Active viewer and
place the node under it, also adds
name of group into Input Process of the viewer
Arguments:
group_node (nuke node): nuke group node object
"""
group_node_name = group_node["name"].value()
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
if len(viewer) > 0:
viewer = viewer[0]
else:
msg = str("Please create Viewer node before you "
"run this action again")
self.log.error(msg)
nuke.message(msg)
return None
# get coordinates of Viewer1
xpos = viewer["xpos"].value()
ypos = viewer["ypos"].value()
ypos += 150
viewer["ypos"].setValue(ypos)
# set coordinates to group node
group_node["xpos"].setValue(xpos)
group_node["ypos"].setValue(ypos + 50)
# add group node name to Viewer Input Process
viewer["input_process_node"].setValue(group_node_name)
# put backdrop under
lib.create_backdrop(
label="Input Process",
layer=2,
nodes=[viewer, group_node],
color="0x7c7faaff")
return True
def reorder_nodes(self, data):
new_order = OrderedDict()
trackNums = [v["trackIndex"] for k, v in data.items()
if isinstance(v, dict)]
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
if isinstance(v, dict)]
for trackIndex in range(
min(trackNums), max(trackNums) + 1):
for subTrackIndex in range(
min(subTrackNums), max(subTrackNums) + 1):
item = self.get_item(data, trackIndex, subTrackIndex)
if item is not {}:
new_order.update(item)
return new_order
def get_item(self, data, trackIndex, subTrackIndex):
return {key: val for key, val in data.items()
if isinstance(val, dict)
if subTrackIndex == val["subTrackIndex"]
if trackIndex == val["trackIndex"]}
def byteify(self, input):
"""
Converts unicode strings to strings
It goes trought all dictionary
Arguments:
input (dict/str): input
Returns:
dict: with fixed values and keys
"""
if isinstance(input, dict):
return {self.byteify(key): self.byteify(value)
for key, value in input.iteritems()}
elif isinstance(input, list):
return [self.byteify(element) for element in input]
elif isinstance(input, unicode):
return input.encode('utf-8')
else:
return input
def switch(self, container, representation):
self.update(container, representation)
def remove(self, container):
node = nuke.toNode(container['objectName'])
with viewer_update_and_undo_stop():
nuke.delete(node)
| 33.77135 | 79 | 0.514805 |
7948cbcd5b329fa8148010494ec9d74c1d6a3e50
| 2,039 |
py
|
Python
|
api/models.py
|
thevetdoctor/quickcheck
|
7990752fa3f118b8308df4a2ae58223d7c394329
|
[
"MIT"
] | 1 |
2021-12-18T07:08:09.000Z
|
2021-12-18T07:08:09.000Z
|
api/models.py
|
thevetdoctor/quickcheck
|
7990752fa3f118b8308df4a2ae58223d7c394329
|
[
"MIT"
] | null | null | null |
api/models.py
|
thevetdoctor/quickcheck
|
7990752fa3f118b8308df4a2ae58223d7c394329
|
[
"MIT"
] | null | null | null |
from . import db
class News(db.Model):
id = db.Column(db.Integer, primary_key=True)
item_id = db.Column(db.Integer, unique=True, nullable=False)
title = db.Column(db.String, nullable=True)
type = db.Column(db.String(20), nullable=False)
text = db.Column(db.String, nullable=True)
time = db.Column(db.Integer, nullable=False)
url = db.Column(db.String(250), nullable=True)
by = db.Column(db.String(50), nullable=True)
source = db.Column(db.String(50), nullable=False)
# kids = db.relationship('Comments', backref='news', lazy='joined')
def __init__(self, item_id, title, type, text, time, url, by, source):
self.item_id = item_id
self.title = title
self.type = type
self.text = text
self.time = time
self.url = url
self.by = by
self.source = source
class Comments(db.Model):
id = db.Column(db.Integer, primary_key=True)
item_id = db.Column(db.Integer, unique=True, nullable=False)
title = db.Column(db.String, nullable=True)
type = db.Column(db.String(20), nullable=False)
text = db.Column(db.String, nullable=True)
time = db.Column(db.Integer, nullable=False)
url = db.Column(db.String(250), nullable=True)
by = db.Column(db.String(50), nullable=True)
source = db.Column(db.String(50), nullable=False)
parent = db.Column(db.Integer, db.ForeignKey(
'news.item_id'), nullable=False)
parents = db.relationship('News', lazy='joined', innerjoin=True)
parentz = db.Column(db.Integer, db.ForeignKey(
'comments.item_id'), nullable=True)
kids = db.relationship(
'Comments', remote_side=[item_id])
def __init__(self, item_id, title, type, text, time, url, by, source, parent, parentz):
self.item_id = item_id
self.title = title
self.type = type
self.text = text
self.time = time
self.url = url
self.by = by
self.source = source
self.parent = parent
self.parentz = parentz
| 34.559322 | 91 | 0.635606 |
7948cd1ef747912261890453712d0d4551fc1019
| 4,733 |
py
|
Python
|
EncoderDecoder/prep_inchi.py
|
Fpiotro/MOLECULAR-TRANSLATION
|
050dd0c093ee4e68326c2404c5b4dbf53ca6c8a0
|
[
"MIT"
] | 5 |
2021-12-09T11:22:04.000Z
|
2022-03-28T06:54:29.000Z
|
EncoderDecoder/prep_inchi.py
|
Fpiotro/MOLECULAR-TRANSLATION
|
050dd0c093ee4e68326c2404c5b4dbf53ca6c8a0
|
[
"MIT"
] | null | null | null |
EncoderDecoder/prep_inchi.py
|
Fpiotro/MOLECULAR-TRANSLATION
|
050dd0c093ee4e68326c2404c5b4dbf53ca6c8a0
|
[
"MIT"
] | null | null | null |
# ====================================================
# Library
# ====================================================
import os
import re
import numpy as np
import pandas as pd
import torch
# ====================================================
# Data Loading
# ====================================================
train = pd.read_csv('../input/bms-molecular-translation/train_labels.csv')
print(f'train.shape: {train.shape}')
# ====================================================
# Preprocess functions
# ====================================================
def split_form(form):
string = ''
for i in re.findall(r"[A-Z][^A-Z]*", form):
elem = re.match(r"\D+", i).group()
num = i.replace(elem, "")
if num == "":
string += f"{elem} "
else:
string += f"{elem} {str(num)} "
return string.rstrip(' ')
def split_form2(form):
string = ''
for i in re.findall(r"[a-z][^a-z]*", form):
elem = i[0]
num = i.replace(elem, "").replace('/', "")
num_string = ''
for j in re.findall(r"[0-9]+[^0-9]*", num):
num_list = list(re.findall(r'\d+', j))
assert len(num_list) == 1, f"len(num_list) != 1"
_num = num_list[0]
if j == _num:
num_string += f"{_num} "
else:
extra = j.replace(_num, "")
num_string += f"{_num} {' '.join(list(extra))} "
string += f"/{elem} {num_string}"
return string.rstrip(' ')
def get_train_file_path(image_id):
return "../input/bms-molecular-translation/train/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
# ====================================================
# Tokenizer
# ====================================================
class Tokenizer(object):
def __init__(self):
self.stoi = {}
self.itos = {}
def __len__(self):
return len(self.stoi)
def fit_on_texts(self, texts):
vocab = set()
for text in texts:
vocab.update(text.split(' '))
vocab = sorted(vocab)
vocab.append('<sos>')
vocab.append('<eos>')
vocab.append('<pad>')
for i, s in enumerate(vocab):
self.stoi[s] = i
self.itos = {item[1]: item[0] for item in self.stoi.items()}
def text_to_sequence(self, text):
sequence = []
sequence.append(self.stoi['<sos>'])
for s in text.split(' '):
sequence.append(self.stoi[s])
sequence.append(self.stoi['<eos>'])
return sequence
def texts_to_sequences(self, texts):
sequences = []
for text in texts:
sequence = self.text_to_sequence(text)
sequences.append(sequence)
return sequences
def sequence_to_text(self, sequence):
return ''.join(list(map(lambda i: self.itos[i], sequence)))
def sequences_to_texts(self, sequences):
texts = []
for sequence in sequences:
text = self.sequence_to_text(sequence)
texts.append(text)
return texts
def predict_captions(self, sequences):
captions = []
for sequence in sequences:
caption = self.predict_caption(sequence)
captions.append(caption)
return captions
# ====================================================
# Main
# ====================================================
def main():
# ====================================================
# preprocess train.csv
# ====================================================
train['InChI_text'] = train['InChI'].apply(lambda x: '/'.join(x.split('/')[2:])).progress_apply(split_form2).values
# ====================================================
# create tokenizer
# ====================================================
tokenizer = Tokenizer()
tokenizer.fit_on_texts(train['InChI_text'].values)
torch.save(tokenizer, 'tokenizer2.pth')
print('Saved tokenizer')
# ====================================================
# preprocess train.csv
# ====================================================
lengths = []
tk0 = tqdm(train['InChI_text'].values, total=len(train))
for text in tk0:
seq = tokenizer.text_to_sequence(text)
length = len(seq) - 2
lengths.append(length)
train['InChI_length'] = lengths
train['file_path'] = train['image_id'].apply(get_train_file_path)
train.to_pickle('train2.pkl')
print('Saved preprocessed train.pkl')
if __name__ == '__main__':
main()
| 34.05036 | 120 | 0.445383 |
7948cdcbab4a100747e3e047695882c6ccc4ac90
| 4,132 |
py
|
Python
|
ditto/pinboard/migrations/0011_auto_20151002_1525.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 54 |
2016-08-15T17:32:41.000Z
|
2022-02-27T03:32:05.000Z
|
ditto/pinboard/migrations/0011_auto_20151002_1525.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 229 |
2015-07-23T12:50:47.000Z
|
2022-03-24T10:33:20.000Z
|
ditto/pinboard/migrations/0011_auto_20151002_1525.py
|
garrettc/django-ditto
|
fcf15beb8f9b4d61634efd4a88064df12ee16a6f
|
[
"MIT"
] | 8 |
2015-09-10T17:10:35.000Z
|
2022-03-25T13:05:01.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("pinboard", "0010_auto_20150730_1112"),
]
operations = [
migrations.AlterField(
model_name="account",
name="api_token",
field=models.CharField(
help_text='From https://pinboard.in/settings/password eg, "philgyford:1234567890ABCDEFGHIJ"', # noqa: E501
max_length=51,
),
),
migrations.AlterField(
model_name="account",
name="time_created",
field=models.DateTimeField(
help_text="The time this item was created in the database.",
auto_now_add=True,
),
),
migrations.AlterField(
model_name="account",
name="time_modified",
field=models.DateTimeField(
help_text="The time this item was last saved to the database.",
auto_now=True,
),
),
migrations.AlterField(
model_name="account",
name="url",
field=models.URLField(
unique=True,
help_text="eg, 'https://pinboard.in/u:philgyford'",
max_length=255,
),
),
migrations.AlterField(
model_name="account",
name="username",
field=models.CharField(
unique=True, help_text="eg, 'philgyford'", max_length=30
),
),
migrations.AlterField(
model_name="bookmark",
name="description",
field=models.TextField(
help_text="The 'extended' text description.", blank=True
),
),
migrations.AlterField(
model_name="bookmark",
name="fetch_time",
field=models.DateTimeField(
help_text="The time the item's data was last fetched, and was new or changed.", # noqa: E501
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="bookmark",
name="is_private",
field=models.BooleanField(
help_text="If set, this item will not be shown on public-facing pages.",
default=False,
),
),
migrations.AlterField(
model_name="bookmark",
name="permalink",
field=models.URLField(
help_text="URL of the item on the service's website.", blank=True
),
),
migrations.AlterField(
model_name="bookmark",
name="post_time",
field=models.DateTimeField(
help_text="The time this was created on Pinboard.",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="bookmark",
name="raw",
field=models.TextField(
help_text="eg, the raw JSON from the API.", blank=True
),
),
migrations.AlterField(
model_name="bookmark",
name="summary",
field=models.CharField(
help_text="eg, Initial text of a blog post, start of the description of a photo, all of a Tweet's text, etc. No HTML.", # noqa: E501
blank=True,
max_length=255,
),
),
migrations.AlterField(
model_name="bookmark",
name="time_created",
field=models.DateTimeField(
help_text="The time this item was created in the database.",
auto_now_add=True,
),
),
migrations.AlterField(
model_name="bookmark",
name="time_modified",
field=models.DateTimeField(
help_text="The time this item was last saved to the database.",
auto_now=True,
),
),
]
| 32.535433 | 149 | 0.505808 |
7948cdd8b582b2fcd8fce30038647cb2a78a07a2
| 46 |
py
|
Python
|
miranda/eccc/__init__.py
|
Ouranosinc/miranda
|
5c54767a4e6e6c3c1f638ca0fe22673ea98e2746
|
[
"Apache-2.0"
] | 4 |
2019-11-07T17:45:26.000Z
|
2021-09-22T18:22:01.000Z
|
miranda/eccc/__init__.py
|
Ouranosinc/miranda
|
5c54767a4e6e6c3c1f638ca0fe22673ea98e2746
|
[
"Apache-2.0"
] | 12 |
2019-09-19T17:05:39.000Z
|
2022-03-31T20:26:16.000Z
|
miranda/eccc/__init__.py
|
Ouranosinc/miranda
|
5c54767a4e6e6c3c1f638ca0fe22673ea98e2746
|
[
"Apache-2.0"
] | 1 |
2020-02-01T01:01:22.000Z
|
2020-02-01T01:01:22.000Z
|
from ._raw import *
from ._summaries import *
| 15.333333 | 25 | 0.73913 |
7948cde580135c8041432461eca31cd064cb5736
| 55,496 |
py
|
Python
|
dask-gateway-server/dask_gateway_server/backends/db_base.py
|
ncalandra/dask-gateway
|
17cd3a923b15fdd98a40475d480f80a2e1b463e1
|
[
"BSD-3-Clause"
] | null | null | null |
dask-gateway-server/dask_gateway_server/backends/db_base.py
|
ncalandra/dask-gateway
|
17cd3a923b15fdd98a40475d480f80a2e1b463e1
|
[
"BSD-3-Clause"
] | null | null | null |
dask-gateway-server/dask_gateway_server/backends/db_base.py
|
ncalandra/dask-gateway
|
17cd3a923b15fdd98a40475d480f80a2e1b463e1
|
[
"BSD-3-Clause"
] | null | null | null |
import asyncio
import base64
import json
import os
import uuid
from collections import defaultdict
from itertools import chain, islice
import sqlalchemy as sa
from async_timeout import timeout
from traitlets import Unicode, Bool, List, Integer, Float, validate, default
from cryptography.fernet import MultiFernet, Fernet
from .base import Backend
from .. import models
from ..proxy import Proxy
from ..tls import new_keypair
from ..workqueue import WorkQueue, Backoff, WorkQueueClosed
from ..utils import FrozenAttrDict, TaskPool, Flag, normalize_address, timestamp
__all__ = ("DBBackendBase", "Cluster", "Worker")
def _normalize_encrypt_key(key):
if isinstance(key, str):
key = key.encode("ascii")
if len(key) == 44:
try:
key = base64.urlsafe_b64decode(key)
except ValueError:
pass
if len(key) == 32:
return base64.urlsafe_b64encode(key)
raise ValueError(
"All keys in `db_encrypt_keys`/`DASK_GATEWAY_ENCRYPT_KEYS` must be 32 "
"bytes, base64-encoded"
)
def _is_in_memory_db(url):
return url in ("sqlite://", "sqlite:///:memory:")
class _IntEnum(sa.TypeDecorator):
impl = sa.Integer
def __init__(self, enumclass, *args, **kwargs):
super().__init__(*args, **kwargs)
self._enumclass = enumclass
def process_bind_param(self, value, dialect):
return value.value
def process_result_value(self, value, dialect):
return self._enumclass(value)
class _JSON(sa.TypeDecorator):
"Represents an immutable structure as a json-encoded string."
impl = sa.LargeBinary
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value).encode("utf-8")
return value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class JobStatus(models.IntEnum):
CREATED = 1
SUBMITTED = 2
RUNNING = 3
CLOSING = 4
STOPPED = 5
FAILED = 6
class Cluster(object):
"""Information on a cluster.
Not all attributes on this object are publically accessible. When writing a
backend, you may access the following attributes:
Attributes
----------
name : str
The cluster name.
username : str
The user associated with this cluster.
token : str
The API token associated with this cluster. Used to authenticate the
cluster with the gateway.
config : FrozenAttrDict
The serialized ``ClusterConfig`` associated with this cluster.
state : dict
Any cluster state, as yielded from ``do_start_cluster``.
scheduler_address : str
The scheduler address. The empty string if the cluster is not running.
dashboard_address : str
The dashboard address. The empty string if the cluster is not running,
or no dashboard is running on the cluster.
api_address : str
The cluster's api address. The empty string if the cluster is not running.
tls_cert : bytes
The TLS cert credentials associated with the cluster.
tls_key : bytes
The TLS key credentials associated with the cluster.
"""
def __init__(
self,
id=None,
name=None,
username=None,
token=None,
options=None,
config=None,
status=None,
target=None,
count=0,
state=None,
scheduler_address="",
dashboard_address="",
api_address="",
tls_cert=b"",
tls_key=b"",
start_time=None,
stop_time=None,
):
self.id = id
self.name = name
self.username = username
self.token = token
self.options = options
self.config = config
self.status = status
self.target = target
self.count = count
self.state = state
self.scheduler_address = scheduler_address
self.dashboard_address = dashboard_address
self.api_address = api_address
self.tls_cert = tls_cert
self.tls_key = tls_key
self.start_time = start_time
self.stop_time = stop_time
if self.status == JobStatus.RUNNING:
self.last_heartbeat = timestamp()
else:
self.last_heartbeat = None
self.worker_start_failure_count = 0
self.added_to_proxies = False
self.workers = {}
self.ready = Flag()
if self.status >= JobStatus.RUNNING:
self.ready.set()
self.shutdown = Flag()
if self.status >= JobStatus.STOPPED:
self.shutdown.set()
_status_map = {
(JobStatus.CREATED, JobStatus.RUNNING): models.ClusterStatus.PENDING,
(JobStatus.CREATED, JobStatus.CLOSING): models.ClusterStatus.STOPPING,
(JobStatus.CREATED, JobStatus.STOPPED): models.ClusterStatus.STOPPING,
(JobStatus.CREATED, JobStatus.FAILED): models.ClusterStatus.STOPPING,
(JobStatus.SUBMITTED, JobStatus.RUNNING): models.ClusterStatus.PENDING,
(JobStatus.SUBMITTED, JobStatus.CLOSING): models.ClusterStatus.STOPPING,
(JobStatus.SUBMITTED, JobStatus.STOPPED): models.ClusterStatus.STOPPING,
(JobStatus.SUBMITTED, JobStatus.FAILED): models.ClusterStatus.STOPPING,
(JobStatus.RUNNING, JobStatus.RUNNING): models.ClusterStatus.RUNNING,
(JobStatus.RUNNING, JobStatus.CLOSING): models.ClusterStatus.STOPPING,
(JobStatus.RUNNING, JobStatus.STOPPED): models.ClusterStatus.STOPPING,
(JobStatus.RUNNING, JobStatus.FAILED): models.ClusterStatus.STOPPING,
(JobStatus.CLOSING, JobStatus.STOPPED): models.ClusterStatus.STOPPING,
(JobStatus.CLOSING, JobStatus.FAILED): models.ClusterStatus.STOPPING,
(JobStatus.STOPPED, JobStatus.STOPPED): models.ClusterStatus.STOPPED,
(JobStatus.FAILED, JobStatus.FAILED): models.ClusterStatus.FAILED,
}
def active_workers(self):
return [w for w in self.workers.values() if w.is_active()]
def is_active(self):
return self.target < JobStatus.STOPPED
def all_workers_at_least(self, status):
return all(w.status >= status for w in self.workers.values())
@property
def model_status(self):
return self._status_map[self.status, self.target]
def to_model(self):
return models.Cluster(
name=self.name,
username=self.username,
token=self.token,
options=self.options,
config=self.config,
status=self.model_status,
scheduler_address=self.scheduler_address,
dashboard_address=self.dashboard_address,
api_address=self.api_address,
tls_cert=self.tls_cert,
tls_key=self.tls_key,
start_time=self.start_time,
stop_time=self.stop_time,
)
class Worker(object):
"""Information on a worker.
Not all attributes on this object are publically accessible. When writing a
backend, you may access the following attributes:
Attributes
----------
name : str
The worker name.
cluster : Cluster
The cluster associated with this worker.
state : dict
Any worker state, as yielded from ``do_start_worker``.
"""
def __init__(
self,
id=None,
name=None,
cluster=None,
status=None,
target=None,
state=None,
start_time=None,
stop_time=None,
close_expected=False,
):
self.id = id
self.name = name
self.cluster = cluster
self.status = status
self.target = target
self.state = state
self.start_time = start_time
self.stop_time = stop_time
self.close_expected = close_expected
def is_active(self):
return self.target < JobStatus.STOPPED
metadata = sa.MetaData()
clusters = sa.Table(
"clusters",
metadata,
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("name", sa.Unicode(255), nullable=False, unique=True),
sa.Column("username", sa.Unicode(255), nullable=False),
sa.Column("status", _IntEnum(JobStatus), nullable=False),
sa.Column("target", _IntEnum(JobStatus), nullable=False),
sa.Column("count", sa.Integer, nullable=False),
sa.Column("options", _JSON, nullable=False),
sa.Column("config", _JSON, nullable=False),
sa.Column("state", _JSON, nullable=False),
sa.Column("token", sa.BINARY(140), nullable=False, unique=True),
sa.Column("scheduler_address", sa.Unicode(255), nullable=False),
sa.Column("dashboard_address", sa.Unicode(255), nullable=False),
sa.Column("api_address", sa.Unicode(255), nullable=False),
sa.Column("tls_credentials", sa.LargeBinary, nullable=False),
sa.Column("start_time", sa.Integer, nullable=False),
sa.Column("stop_time", sa.Integer, nullable=True),
)
workers = sa.Table(
"workers",
metadata,
sa.Column("id", sa.Integer, primary_key=True, autoincrement=True),
sa.Column("name", sa.Unicode(255), nullable=False),
sa.Column(
"cluster_id", sa.ForeignKey("clusters.id", ondelete="CASCADE"), nullable=False
),
sa.Column("status", _IntEnum(JobStatus), nullable=False),
sa.Column("target", _IntEnum(JobStatus), nullable=False),
sa.Column("state", _JSON, nullable=False),
sa.Column("start_time", sa.Integer, nullable=False),
sa.Column("stop_time", sa.Integer, nullable=True),
sa.Column("close_expected", sa.Integer, nullable=False),
)
class DataManager(object):
"""Holds the internal state for a single Dask Gateway.
Keeps the memory representation in-sync with the database.
"""
def __init__(self, url="sqlite:///:memory:", encrypt_keys=(), **kwargs):
if url.startswith("sqlite"):
kwargs["connect_args"] = {"check_same_thread": False}
if _is_in_memory_db(url):
kwargs["poolclass"] = sa.pool.StaticPool
self.fernet = None
else:
self.fernet = MultiFernet([Fernet(key) for key in encrypt_keys])
engine = sa.create_engine(url, **kwargs)
if url.startswith("sqlite"):
# Register PRAGMA foreigh_keys=on for sqlite
@sa.event.listens_for(engine, "connect")
def connect(dbapi_con, con_record):
cursor = dbapi_con.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
metadata.create_all(engine)
self.db = engine
self.username_to_clusters = defaultdict(dict)
self.name_to_cluster = {}
self.id_to_cluster = {}
# Load all existing clusters into memory
for c in self.db.execute(clusters.select()):
tls_cert, tls_key = self.decode_tls_credentials(c.tls_credentials)
token = self.decode_token(c.token)
cluster = Cluster(
id=c.id,
name=c.name,
username=c.username,
token=token,
options=c.options,
config=FrozenAttrDict(c.config),
status=c.status,
target=c.target,
count=c.count,
state=c.state,
scheduler_address=c.scheduler_address,
dashboard_address=c.dashboard_address,
api_address=c.api_address,
tls_cert=tls_cert,
tls_key=tls_key,
start_time=c.start_time,
stop_time=c.stop_time,
)
self.username_to_clusters[cluster.username][cluster.name] = cluster
self.id_to_cluster[cluster.id] = cluster
self.name_to_cluster[cluster.name] = cluster
# Next load all existing workers into memory
for w in self.db.execute(workers.select()):
cluster = self.id_to_cluster[w.cluster_id]
worker = Worker(
id=w.id,
name=w.name,
status=w.status,
target=w.target,
cluster=cluster,
state=w.state,
start_time=w.start_time,
stop_time=w.stop_time,
close_expected=w.close_expected,
)
cluster.workers[worker.name] = worker
def cleanup_expired(self, max_age_in_seconds):
cutoff = timestamp() - max_age_in_seconds * 1000
with self.db.begin() as conn:
to_delete = conn.execute(
sa.select([clusters.c.id]).where(clusters.c.stop_time < cutoff)
).fetchall()
if to_delete:
to_delete = [i for i, in to_delete]
conn.execute(
clusters.delete().where(clusters.c.id == sa.bindparam("id")),
[{"id": i} for i in to_delete],
)
for i in to_delete:
cluster = self.id_to_cluster.pop(i)
self.name_to_cluster.pop(cluster.name, None)
user_clusters = self.username_to_clusters[cluster.username]
user_clusters.pop(cluster.name)
if not user_clusters:
self.username_to_clusters.pop(cluster.username)
return len(to_delete)
def encrypt(self, b):
"""Encrypt bytes ``b``. If encryption is disabled this is a no-op"""
return b if self.fernet is None else self.fernet.encrypt(b)
def decrypt(self, b):
"""Decrypt bytes ``b``. If encryption is disabled this is a no-op"""
return b if self.fernet is None else self.fernet.decrypt(b)
def encode_tls_credentials(self, tls_cert, tls_key):
return self.encrypt(b";".join((tls_cert, tls_key)))
def decode_tls_credentials(self, data):
return self.decrypt(data).split(b";")
def encode_token(self, token):
return self.encrypt(token.encode("utf8"))
def decode_token(self, data):
return self.decrypt(data).decode()
def get_cluster(self, cluster_name):
return self.name_to_cluster.get(cluster_name)
def list_clusters(self, username=None, statuses=None):
if statuses is None:
select = lambda x: x.is_active()
else:
statuses = set(statuses)
select = lambda x: x.model_status in statuses
if username is None:
return [
cluster for cluster in self.name_to_cluster.values() if select(cluster)
]
else:
clusters = self.username_to_clusters.get(username)
if clusters is None:
return []
return [cluster for cluster in clusters.values() if select(cluster)]
def active_clusters(self):
for cluster in self.name_to_cluster.values():
if cluster.is_active():
yield cluster
def create_cluster(self, username, options, config):
"""Create a new cluster for a user"""
cluster_name = uuid.uuid4().hex
token = uuid.uuid4().hex
tls_cert, tls_key = new_keypair(cluster_name)
# Encode the tls credentials for storing in the database
tls_credentials = self.encode_tls_credentials(tls_cert, tls_key)
enc_token = self.encode_token(token)
common = {
"name": cluster_name,
"username": username,
"options": options,
"status": JobStatus.CREATED,
"target": JobStatus.RUNNING,
"count": 0,
"state": {},
"scheduler_address": "",
"dashboard_address": "",
"api_address": "",
"start_time": timestamp(),
}
with self.db.begin() as conn:
res = conn.execute(
clusters.insert().values(
tls_credentials=tls_credentials,
token=enc_token,
config=config,
**common,
)
)
cluster = Cluster(
id=res.inserted_primary_key[0],
token=token,
tls_cert=tls_cert,
tls_key=tls_key,
config=FrozenAttrDict(config),
**common,
)
self.id_to_cluster[cluster.id] = cluster
self.name_to_cluster[cluster_name] = cluster
self.username_to_clusters[username][cluster_name] = cluster
return cluster
def create_worker(self, cluster):
"""Create a new worker for a cluster"""
worker_name = uuid.uuid4().hex
common = {
"name": worker_name,
"status": JobStatus.CREATED,
"target": JobStatus.RUNNING,
"state": {},
"start_time": timestamp(),
"close_expected": False,
}
with self.db.begin() as conn:
res = conn.execute(workers.insert().values(cluster_id=cluster.id, **common))
worker = Worker(id=res.inserted_primary_key[0], cluster=cluster, **common)
cluster.workers[worker.name] = worker
return worker
def update_cluster(self, cluster, **kwargs):
"""Update a cluster's state"""
with self.db.begin() as conn:
conn.execute(
clusters.update().where(clusters.c.id == cluster.id).values(**kwargs)
)
for k, v in kwargs.items():
setattr(cluster, k, v)
def update_clusters(self, updates):
"""Update multiple clusters' states"""
if not updates:
return
with self.db.begin() as conn:
conn.execute(
clusters.update().where(clusters.c.id == sa.bindparam("_id")),
[{"_id": c.id, **u} for c, u in updates],
)
for c, u in updates:
for k, v in u.items():
setattr(c, k, v)
def update_worker(self, worker, **kwargs):
"""Update a worker's state"""
with self.db.begin() as conn:
conn.execute(
workers.update().where(workers.c.id == worker.id).values(**kwargs)
)
for k, v in kwargs.items():
setattr(worker, k, v)
def update_workers(self, updates):
"""Update multiple workers' states"""
if not updates:
return
with self.db.begin() as conn:
conn.execute(
workers.update().where(workers.c.id == sa.bindparam("_id")),
[{"_id": w.id, **u} for w, u in updates],
)
for w, u in updates:
for k, v in u.items():
setattr(w, k, v)
class DBBackendBase(Backend):
"""A base class for defining backends that rely on a database for managing state.
Subclasses should define the following methods:
- ``do_setup``
- ``do_cleanup``
- ``do_start_cluster``
- ``do_stop_cluster``
- ``do_check_clusters``
- ``do_start_worker``
- ``do_stop_worker``
- ``do_check_workers``
"""
db_url = Unicode(
"sqlite:///:memory:",
help="""
The URL for the database. Default is in-memory only.
If not in-memory, ``db_encrypt_keys`` must also be set.
""",
config=True,
)
db_encrypt_keys = List(
help="""
A list of keys to use to encrypt private data in the database. Can also
be set by the environment variable ``DASK_GATEWAY_ENCRYPT_KEYS``, where
the value is a ``;`` delimited string of encryption keys.
Each key should be a base64-encoded 32 byte value, and should be
cryptographically random. Lacking other options, openssl can be used to
generate a single key via:
.. code-block:: shell
$ openssl rand -base64 32
A single key is valid, multiple keys can be used to support key rotation.
""",
config=True,
)
@default("db_encrypt_keys")
def _db_encrypt_keys_default(self):
keys = os.environb.get(b"DASK_GATEWAY_ENCRYPT_KEYS", b"").strip()
if not keys:
return []
return [_normalize_encrypt_key(k) for k in keys.split(b";") if k.strip()]
@validate("db_encrypt_keys")
def _db_encrypt_keys_validate(self, proposal):
if not proposal.value and not _is_in_memory_db(self.db_url):
raise ValueError(
"Must configure `db_encrypt_keys`/`DASK_GATEWAY_ENCRYPT_KEYS` "
"when not using an in-memory database"
)
return [_normalize_encrypt_key(k) for k in proposal.value]
db_debug = Bool(
False, help="If True, all database operations will be logged", config=True
)
db_cleanup_period = Float(
600,
help="""
Time (in seconds) between database cleanup tasks.
This sets how frequently old records are removed from the database.
This shouldn't be too small (to keep the overhead low), but should be
smaller than ``db_record_max_age`` (probably by an order of magnitude).
""",
config=True,
)
db_cluster_max_age = Float(
3600 * 24,
help="""
Max time (in seconds) to keep around records of completed clusters.
Every ``db_cleanup_period``, completed clusters older than
``db_cluster_max_age`` are removed from the database.
""",
config=True,
)
stop_clusters_on_shutdown = Bool(
True,
help="""
Whether to stop active clusters on gateway shutdown.
If true, all active clusters will be stopped before shutting down the
gateway. Set to False to leave active clusters running.
""",
config=True,
)
@validate("stop_clusters_on_shutdown")
def _stop_clusters_on_shutdown_validate(self, proposal):
if not proposal.value and _is_in_memory_db(self.db_url):
raise ValueError(
"When using an in-memory database, `stop_clusters_on_shutdown` "
"must be True"
)
return proposal.value
cluster_status_period = Float(
30,
help="""
Time (in seconds) between cluster status checks.
A smaller period will detect failed clusters sooner, but will use more
resources. A larger period will provide slower feedback in the presence
of failures.
""",
config=True,
)
worker_status_period = Float(
30,
help="""
Time (in seconds) between worker status checks.
A smaller period will detect failed workers sooner, but will use more
resources. A larger period will provide slower feedback in the presence
of failures.
""",
config=True,
)
cluster_heartbeat_period = Integer(
15,
help="""
Time (in seconds) between cluster heartbeats to the gateway.
A smaller period will detect failed workers sooner, but will use more
resources. A larger period will provide slower feedback in the presence
of failures.
""",
config=True,
)
cluster_heartbeat_timeout = Float(
help="""
Timeout (in seconds) before killing a dask cluster that's failed to heartbeat.
This should be greater than ``cluster_heartbeat_period``. Defaults to
``2 * cluster_heartbeat_period``.
""",
config=True,
)
@default("cluster_heartbeat_timeout")
def _default_cluster_heartbeat_timeout(self):
return self.cluster_heartbeat_period * 2
cluster_start_timeout = Float(
60,
help="""
Timeout (in seconds) before giving up on a starting dask cluster.
""",
config=True,
)
worker_start_timeout = Float(
60,
help="""
Timeout (in seconds) before giving up on a starting dask worker.
""",
config=True,
)
check_timeouts_period = Float(
help="""
Time (in seconds) between timeout checks.
This shouldn't be too small (to keep the overhead low), but should be
smaller than ``cluster_heartbeat_timeout``, ``cluster_start_timeout``,
and ``worker_start_timeout``.
""",
config=True,
)
@default("check_timeouts_period")
def _default_check_timeouts_period(self):
min_timeout = min(
self.cluster_heartbeat_timeout,
self.cluster_start_timeout,
self.worker_start_timeout,
)
return min(20, min_timeout / 2)
worker_start_failure_limit = Integer(
3,
help="""
A limit on the number of failed attempts to start a worker before the
cluster is marked as failed.
Every worker that fails to start (timeouts exempt) increments a
counter. The counter is reset if a worker successfully starts. If the
counter ever exceeds this limit, the cluster is marked as failed and is
shutdown.
""",
config=True,
)
parallelism = Integer(
20,
help="""
Number of handlers to use for starting/stopping clusters.
""",
config=True,
)
backoff_base_delay = Float(
0.1,
help="""
Base delay (in seconds) for backoff when retrying after failures.
If an operation fails, it is retried after a backoff computed as:
```
min(backoff_max_delay, backoff_base_delay * 2 ** num_failures)
```
""",
config=True,
)
backoff_max_delay = Float(
300,
help="""
Max delay (in seconds) for backoff policy when retrying after failures.
""",
config=True,
)
api_url = Unicode(
help="""
The address that internal components (e.g. dask clusters)
will use when contacting the gateway.
Defaults to `{proxy_address}/{prefix}/api`, set manually if a different
address should be used.
""",
config=True,
)
@default("api_url")
def _api_url_default(self):
proxy = self.proxy
scheme = "https" if proxy.tls_cert else "http"
address = normalize_address(proxy.address, resolve_host=True)
return f"{scheme}://{address}{proxy.prefix}/api"
async def setup(self, app):
await super().setup(app)
# Setup reconcilation queues
self.queue = WorkQueue(
backoff=Backoff(
base_delay=self.backoff_base_delay, max_delay=self.backoff_max_delay
)
)
self.reconcilers = [
asyncio.ensure_future(self.reconciler_loop())
for _ in range(self.parallelism)
]
# Start the proxy
self.proxy = Proxy(parent=self, log=self.log)
await self.proxy.setup(app)
# Load the database
self.db = DataManager(
url=self.db_url, echo=self.db_debug, encrypt_keys=self.db_encrypt_keys
)
# Start background tasks
self.task_pool = TaskPool()
self.task_pool.spawn(self.check_timeouts_loop())
self.task_pool.spawn(self.check_clusters_loop())
self.task_pool.spawn(self.check_workers_loop())
self.task_pool.spawn(self.cleanup_db_loop())
# Load all active clusters/workers into reconcilation queues
for cluster in self.db.name_to_cluster.values():
if cluster.status < JobStatus.STOPPED:
self.queue.put(cluster)
for worker in cluster.workers.values():
if worker.status < JobStatus.STOPPED:
self.queue.put(worker)
# Further backend-specific setup
await self.do_setup()
self.log.info(
"Backend started, clusters will contact api server at %s", self.api_url
)
async def cleanup(self):
if hasattr(self, "task_pool"):
# Stop background tasks
await self.task_pool.close()
if hasattr(self, "db"):
if self.stop_clusters_on_shutdown:
# Request all active clusters be stopped
active = list(self.db.active_clusters())
if active:
self.log.info("Stopping %d active clusters...", len(active))
self.db.update_clusters(
[(c, {"target": JobStatus.FAILED}) for c in active]
)
for c in active:
self.queue.put(c)
# Wait until all clusters are shutdown
pending_shutdown = [
c
for c in self.db.name_to_cluster.values()
if c.status < JobStatus.STOPPED
]
if pending_shutdown:
await asyncio.wait([c.shutdown for c in pending_shutdown])
# Stop reconcilation queues
if hasattr(self, "reconcilers"):
self.queue.close()
await asyncio.gather(*self.reconcilers, return_exceptions=True)
await self.do_cleanup()
if hasattr(self, "proxy"):
await self.proxy.cleanup()
await super().cleanup()
async def list_clusters(self, username=None, statuses=None):
clusters = self.db.list_clusters(username=username, statuses=statuses)
return [c.to_model() for c in clusters]
async def get_cluster(self, cluster_name, wait=False):
cluster = self.db.get_cluster(cluster_name)
if cluster is None:
return None
if wait:
try:
await asyncio.wait_for(cluster.ready, 20)
except asyncio.TimeoutError:
pass
return cluster.to_model()
async def start_cluster(self, user, cluster_options):
options, config = await self.process_cluster_options(user, cluster_options)
cluster = self.db.create_cluster(user.name, options, config.to_dict())
self.log.info("Created cluster %s for user %s", cluster.name, user.name)
self.queue.put(cluster)
return cluster.name
async def stop_cluster(self, cluster_name, failed=False):
cluster = self.db.get_cluster(cluster_name)
if cluster is None:
return
if cluster.target <= JobStatus.RUNNING:
self.log.info("Stopping cluster %s", cluster.name)
target = JobStatus.FAILED if failed else JobStatus.STOPPED
self.db.update_cluster(cluster, target=target)
self.queue.put(cluster)
async def on_cluster_heartbeat(self, cluster_name, msg):
cluster = self.db.get_cluster(cluster_name)
if cluster is None or cluster.target > JobStatus.RUNNING:
return
cluster.last_heartbeat = timestamp()
if cluster.status == JobStatus.RUNNING:
cluster_update = {}
else:
cluster_update = {
"api_address": msg["api_address"],
"scheduler_address": msg["scheduler_address"],
"dashboard_address": msg["dashboard_address"],
}
count = msg["count"]
active_workers = set(msg["active_workers"])
closing_workers = set(msg["closing_workers"])
closed_workers = set(msg["closed_workers"])
self.log.info(
"Cluster %s heartbeat [count: %d, n_active: %d, n_closing: %d, n_closed: %d]",
cluster_name,
count,
len(active_workers),
len(closing_workers),
len(closed_workers),
)
max_workers = cluster.config.get("cluster_max_workers")
if max_workers is not None and count > max_workers:
# This shouldn't happen under normal operation, but could if the
# user does something malicious (or there's a bug).
self.log.info(
"Cluster %s heartbeat requested %d workers, exceeding limit of %s.",
cluster_name,
count,
max_workers,
)
count = max_workers
if count != cluster.count:
cluster_update["count"] = count
created_workers = []
submitted_workers = []
target_updates = []
newly_running = []
close_expected = []
for worker in cluster.workers.values():
if worker.status >= JobStatus.STOPPED:
continue
elif worker.name in closing_workers:
if worker.status < JobStatus.RUNNING:
newly_running.append(worker)
close_expected.append(worker)
elif worker.name in active_workers:
if worker.status < JobStatus.RUNNING:
newly_running.append(worker)
elif worker.name in closed_workers:
target = (
JobStatus.STOPPED if worker.close_expected else JobStatus.FAILED
)
target_updates.append((worker, {"target": target}))
else:
if worker.status == JobStatus.SUBMITTED:
submitted_workers.append(worker)
else:
assert worker.status == JobStatus.CREATED
created_workers.append(worker)
n_pending = len(created_workers) + len(submitted_workers)
n_to_stop = len(active_workers) + n_pending - count
if n_to_stop > 0:
for w in islice(chain(created_workers, submitted_workers), n_to_stop):
target_updates.append((w, {"target": JobStatus.STOPPED}))
if cluster_update:
self.db.update_cluster(cluster, **cluster_update)
self.queue.put(cluster)
self.db.update_workers(target_updates)
for w, u in target_updates:
self.queue.put(w)
if newly_running:
# At least one worker successfully started, reset failure count
cluster.worker_start_failure_count = 0
self.db.update_workers(
[(w, {"status": JobStatus.RUNNING}) for w in newly_running]
)
for w in newly_running:
self.log.info("Worker %s is running", w.name)
self.db.update_workers([(w, {"close_expected": True}) for w in close_expected])
async def check_timeouts_loop(self):
while True:
await asyncio.sleep(self.check_timeouts_period)
try:
await self._check_timeouts()
except asyncio.CancelledError:
raise
except Exception as exc:
self.log.warning(
"Exception while checking for timed out clusters/workers",
exc_info=exc,
)
async def _check_timeouts(self):
self.log.debug("Checking for timed out clusters/workers")
now = timestamp()
cluster_heartbeat_cutoff = now - self.cluster_heartbeat_timeout * 1000
cluster_start_cutoff = now - self.cluster_start_timeout * 1000
worker_start_cutoff = now - self.worker_start_timeout * 1000
cluster_updates = []
worker_updates = []
for cluster in self.db.active_clusters():
if cluster.status == JobStatus.SUBMITTED:
# Check if submitted clusters have timed out
if cluster.start_time < cluster_start_cutoff:
self.log.info("Cluster %s startup timed out", cluster.name)
cluster_updates.append((cluster, {"target": JobStatus.FAILED}))
elif cluster.status == JobStatus.RUNNING:
# Check if running clusters have missed a heartbeat
if cluster.last_heartbeat < cluster_heartbeat_cutoff:
self.log.info("Cluster %s heartbeat timed out", cluster.name)
cluster_updates.append((cluster, {"target": JobStatus.FAILED}))
else:
for w in cluster.workers.values():
# Check if submitted workers have timed out
if (
w.status == JobStatus.SUBMITTED
and w.target == JobStatus.RUNNING
and w.start_time < worker_start_cutoff
):
self.log.info("Worker %s startup timed out", w.name)
worker_updates.append((w, {"target": JobStatus.FAILED}))
self.db.update_clusters(cluster_updates)
for c, _ in cluster_updates:
self.queue.put(c)
self.db.update_workers(worker_updates)
for w, _ in worker_updates:
self.queue.put(w)
async def check_clusters_loop(self):
while True:
await asyncio.sleep(self.cluster_status_period)
self.log.debug("Checking pending cluster statuses")
try:
clusters = [
c
for c in self.db.active_clusters()
if c.status == JobStatus.SUBMITTED
]
statuses = await self.do_check_clusters(clusters)
updates = [
(c, {"target": JobStatus.FAILED})
for c, ok in zip(clusters, statuses)
if not ok
]
self.db.update_clusters(updates)
for c, _ in updates:
self.log.info("Cluster %s failed during startup", c.name)
self.queue.put(c)
except asyncio.CancelledError:
raise
except Exception as exc:
self.log.warning(
"Exception while checking cluster statuses", exc_info=exc
)
async def check_workers_loop(self):
while True:
await asyncio.sleep(self.worker_status_period)
self.log.debug("Checking pending worker statuses")
try:
clusters = (
c
for c in self.db.active_clusters()
if c.status == JobStatus.RUNNING
)
workers = [
w
for c in clusters
for w in c.active_workers()
if w.status == JobStatus.SUBMITTED
]
statuses = await self.do_check_workers(workers)
updates = [
(w, {"target": JobStatus.FAILED})
for w, ok in zip(workers, statuses)
if not ok
]
self.db.update_workers(updates)
for w, _ in updates:
self.log.info("Worker %s failed during startup", w.name)
w.cluster.worker_start_failure_count += 1
self.queue.put(w)
except asyncio.CancelledError:
raise
except Exception as exc:
self.log.warning(
"Exception while checking worker statuses", exc_info=exc
)
async def cleanup_db_loop(self):
while True:
try:
n = self.db.cleanup_expired(self.db_cluster_max_age)
except Exception as exc:
self.log.error(
"Error while cleaning expired database records", exc_info=exc
)
else:
self.log.debug("Removed %d expired clusters from the database", n)
await asyncio.sleep(self.db_cleanup_period)
async def reconciler_loop(self):
while True:
try:
obj = await self.queue.get()
except WorkQueueClosed:
return
if isinstance(obj, Cluster):
method = self.reconcile_cluster
kind = "cluster"
else:
method = self.reconcile_worker
kind = "worker"
self.log.debug(
"Reconciling %s %s, %s -> %s",
kind,
obj.name,
obj.status.name,
obj.target.name,
)
try:
await method(obj)
except Exception:
self.log.warning(
"Error while reconciling %s %s", kind, obj.name, exc_info=True
)
self.queue.put_backoff(obj)
else:
self.queue.reset_backoff(obj)
finally:
self.queue.task_done(obj)
async def reconcile_cluster(self, cluster):
if cluster.status >= JobStatus.STOPPED:
return
if cluster.target in (JobStatus.STOPPED, JobStatus.FAILED):
if cluster.status == JobStatus.CLOSING:
if self.is_cluster_ready_to_close(cluster):
await self._cluster_to_stopped(cluster)
else:
await self._cluster_to_closing(cluster)
return
if cluster.target == JobStatus.RUNNING:
if cluster.status == JobStatus.CREATED:
await self._cluster_to_submitted(cluster)
return
if cluster.status == JobStatus.SUBMITTED and cluster.scheduler_address:
await self._cluster_to_running(cluster)
if cluster.status == JobStatus.RUNNING:
await self._check_cluster_proxied(cluster)
await self._check_cluster_scale(cluster)
async def reconcile_worker(self, worker):
if worker.status >= JobStatus.STOPPED:
return
if worker.target == JobStatus.CLOSING:
if worker.status != JobStatus.CLOSING:
self.db.update_worker(worker, status=JobStatus.CLOSING)
if self.is_cluster_ready_to_close(worker.cluster):
self.queue.put(worker.cluster)
return
if worker.target in (JobStatus.STOPPED, JobStatus.FAILED):
await self._worker_to_stopped(worker)
if self.is_cluster_ready_to_close(worker.cluster):
self.queue.put(worker.cluster)
elif (
worker.cluster.target == JobStatus.RUNNING and not worker.close_expected
):
self.queue.put(worker.cluster)
return
if worker.status == JobStatus.CREATED and worker.target == JobStatus.RUNNING:
await self._worker_to_submitted(worker)
return
def is_cluster_ready_to_close(self, cluster):
return (
cluster.status == JobStatus.CLOSING
and (
self.supports_bulk_shutdown
and cluster.all_workers_at_least(JobStatus.CLOSING)
)
or cluster.all_workers_at_least(JobStatus.STOPPED)
)
async def _cluster_to_submitted(self, cluster):
self.log.info("Submitting cluster %s...", cluster.name)
try:
async with timeout(self.cluster_start_timeout):
async for state in self.do_start_cluster(cluster):
self.log.debug("State update for cluster %s", cluster.name)
self.db.update_cluster(cluster, state=state)
self.db.update_cluster(cluster, status=JobStatus.SUBMITTED)
self.log.info("Cluster %s submitted", cluster.name)
except asyncio.CancelledError:
raise
except Exception as exc:
if isinstance(exc, asyncio.TimeoutError):
self.log.info("Cluster %s startup timed out", cluster.name)
else:
self.log.warning(
"Failed to submit cluster %s", cluster.name, exc_info=exc
)
self.db.update_cluster(
cluster, status=JobStatus.SUBMITTED, target=JobStatus.FAILED
)
self.queue.put(cluster)
async def _cluster_to_closing(self, cluster):
self.log.debug("Preparing to stop cluster %s", cluster.name)
target = JobStatus.CLOSING if self.supports_bulk_shutdown else JobStatus.STOPPED
workers = [w for w in cluster.workers.values() if w.target < target]
self.db.update_workers([(w, {"target": target}) for w in workers])
for w in workers:
self.queue.put(w)
self.db.update_cluster(cluster, status=JobStatus.CLOSING)
if not workers:
# If there are workers, the cluster will be enqueued after the last one closed
# If there are no workers, requeue now
self.queue.put(cluster)
cluster.ready.set()
async def _cluster_to_stopped(self, cluster):
self.log.info("Stopping cluster %s...", cluster.name)
if cluster.status > JobStatus.CREATED:
try:
await self.do_stop_cluster(cluster)
except Exception as exc:
self.log.warning(
"Exception while stopping cluster %s", cluster.name, exc_info=exc
)
await self.proxy.remove_route(kind="PATH", path=f"/clusters/{cluster.name}")
await self.proxy.remove_route(kind="SNI", sni=cluster.name)
self.log.info("Cluster %s stopped", cluster.name)
self.db.update_workers(
[
(w, {"status": JobStatus.STOPPED, "target": JobStatus.STOPPED})
for w in cluster.workers.values()
if w.status < JobStatus.STOPPED
]
)
self.db.update_cluster(cluster, status=cluster.target, stop_time=timestamp())
cluster.ready.set()
cluster.shutdown.set()
async def _cluster_to_running(self, cluster):
self.log.info("Cluster %s is running", cluster.name)
self.db.update_cluster(cluster, status=JobStatus.RUNNING)
cluster.ready.set()
async def _check_cluster_proxied(self, cluster):
if not cluster.added_to_proxies:
self.log.info("Adding cluster %s routes to proxies", cluster.name)
if cluster.dashboard_address:
await self.proxy.add_route(
kind="PATH",
path=f"/clusters/{cluster.name}",
target=cluster.dashboard_address,
)
await self.proxy.add_route(
kind="SNI", sni=cluster.name, target=cluster.scheduler_address
)
cluster.added_to_proxies = True
async def _check_cluster_scale(self, cluster):
if cluster.worker_start_failure_count >= self.worker_start_failure_limit:
self.log.info(
"Cluster %s had %d consecutive workers fail to start, failing the cluster",
cluster.name,
cluster.worker_start_failure_count,
)
self.db.update_cluster(cluster, target=JobStatus.FAILED)
self.queue.put(cluster)
return
active = cluster.active_workers()
if cluster.count > len(active):
for _ in range(cluster.count - len(active)):
worker = self.db.create_worker(cluster)
self.log.info(
"Created worker %s for cluster %s", worker.name, cluster.name
)
self.queue.put(worker)
async def _worker_to_submitted(self, worker):
self.log.info("Submitting worker %s...", worker.name)
try:
async with timeout(self.worker_start_timeout):
async for state in self.do_start_worker(worker):
self.log.debug("State update for worker %s", worker.name)
self.db.update_worker(worker, state=state)
self.db.update_worker(worker, status=JobStatus.SUBMITTED)
self.log.info("Worker %s submitted", worker.name)
except asyncio.CancelledError:
raise
except Exception as exc:
if isinstance(exc, asyncio.TimeoutError):
self.log.info("Worker %s startup timed out", worker.name)
else:
self.log.warning(
"Failed to submit worker %s", worker.name, exc_info=exc
)
self.db.update_worker(
worker, status=JobStatus.SUBMITTED, target=JobStatus.FAILED
)
worker.cluster.worker_start_failure_count += 1
self.queue.put(worker)
async def _worker_to_stopped(self, worker):
self.log.info("Stopping worker %s...", worker.name)
if worker.status > JobStatus.CREATED:
try:
await self.do_stop_worker(worker)
except Exception as exc:
self.log.warning(
"Exception while stopping worker %s", worker.name, exc_info=exc
)
self.log.info("Worker %s stopped", worker.name)
self.db.update_worker(worker, status=worker.target, stop_time=timestamp())
def get_tls_paths(self, cluster):
"""Return the paths to the cert and key files for this cluster"""
return "dask.crt", "dask.pem"
def get_env(self, cluster):
"""Get a dict of environment variables to set for the process"""
out = dict(cluster.config.environment)
# Set values that dask-gateway needs to run
out.update(
{
"DASK_GATEWAY_API_URL": self.api_url,
"DASK_GATEWAY_API_TOKEN": cluster.token,
"DASK_GATEWAY_CLUSTER_NAME": cluster.name,
"DASK_DISTRIBUTED__COMM__REQUIRE_ENCRYPTION": "True",
}
)
return out
def get_scheduler_env(self, cluster):
env = self.get_env(cluster)
tls_cert_path, tls_key_path = self.get_tls_paths(cluster)
env.update(
{
"DASK_DISTRIBUTED__COMM__TLS__CA_FILE": tls_cert_path,
"DASK_DISTRIBUTED__COMM__TLS__SCHEDULER__KEY": tls_key_path,
"DASK_DISTRIBUTED__COMM__TLS__SCHEDULER__CERT": tls_cert_path,
}
)
return env
def get_worker_env(self, cluster):
env = self.get_env(cluster)
tls_cert_path, tls_key_path = self.get_tls_paths(cluster)
env.update(
{
"DASK_DISTRIBUTED__COMM__TLS__CA_FILE": tls_cert_path,
"DASK_DISTRIBUTED__COMM__TLS__WORKER__KEY": tls_key_path,
"DASK_DISTRIBUTED__COMM__TLS__WORKER__CERT": tls_cert_path,
}
)
return env
default_host = "0.0.0.0"
def get_scheduler_command(self, cluster):
return cluster.config.scheduler_cmd + [
"--protocol",
"tls",
"--port",
"0",
"--host",
self.default_host,
"--dashboard-address",
f"{self.default_host}:0",
"--preload",
"dask_gateway.scheduler_preload",
"--dg-api-address",
f"{self.default_host}:0",
"--dg-heartbeat-period",
str(self.cluster_heartbeat_period),
"--dg-adaptive-period",
str(cluster.config.adaptive_period),
"--dg-idle-timeout",
str(cluster.config.idle_timeout),
]
def worker_nthreads_memory_limit_args(self, cluster):
return str(cluster.config.worker_cores), str(cluster.config.worker_memory)
def get_worker_command(self, cluster, worker_name, scheduler_address=None):
nthreads, memory_limit = self.worker_nthreads_memory_limit_args(cluster)
if scheduler_address is None:
scheduler_address = cluster.scheduler_address
return cluster.config.worker_cmd + [
scheduler_address,
"--dashboard-address",
f"{self.default_host}:0",
"--name",
worker_name,
"--nthreads",
nthreads,
"--memory-limit",
memory_limit,
]
# Subclasses should implement these methods
supports_bulk_shutdown = False
async def do_setup(self):
"""Called when the server is starting up.
Do any initialization here.
"""
pass
async def do_cleanup(self):
"""Called when the server is shutting down.
Do any cleanup here."""
pass
async def do_start_cluster(self, cluster):
"""Start a cluster.
This should do any initialization for the whole dask cluster
application, and then start the scheduler.
Parameters
----------
cluster : Cluster
Information on the cluster to be started.
Yields
------
cluster_state : dict
Any state needed for further interactions with this cluster. This
should be serializable using ``json.dumps``. If startup occurs in
multiple stages, can iteratively yield state updates to be
checkpointed. If an error occurs at any time, the last yielded
state will be used when calling ``do_stop_cluster``.
"""
raise NotImplementedError
async def do_stop_cluster(self, cluster):
"""Stop a cluster.
Parameters
----------
cluster : Cluster
Information on the cluster to be stopped.
"""
raise NotImplementedError
async def do_check_clusters(self, clusters):
"""Check the status of multiple clusters.
This is periodically called to check the status of pending clusters.
Once a cluster is running this will no longer be called.
Parameters
----------
clusters : List[Cluster]
The clusters to be checked.
Returns
-------
statuses : List[bool]
The status for each cluster. Return False if the cluster has
stopped or failed, True if the cluster is pending start or running.
"""
raise NotImplementedError
async def do_start_worker(self, worker):
"""Start a worker.
Parameters
----------
worker : Worker
Information on the worker to be started.
Yields
------
worker_state : dict
Any state needed for further interactions with this worker. This
should be serializable using ``json.dumps``. If startup occurs in
multiple stages, can iteratively yield state updates to be
checkpointed. If an error occurs at any time, the last yielded
state will be used when calling ``do_stop_worker``.
"""
raise NotImplementedError
async def do_stop_worker(self, worker):
"""Stop a worker.
Parameters
----------
worker : Worker
Information on the worker to be stopped.
"""
raise NotImplementedError
async def do_check_workers(self, workers):
"""Check the status of multiple workers.
This is periodically called to check the status of pending workers.
Once a worker is running this will no longer be called.
Parameters
----------
workers : List[Worker]
The workers to be checked.
Returns
-------
statuses : List[bool]
The status for each worker. Return False if the worker has
stopped or failed, True if the worker is pending start or running.
"""
raise NotImplementedError
| 35.213198 | 91 | 0.582583 |
7948ce14ef5b28c9cb3b5eb7ddfbb911bfb8a611
| 4,774 |
py
|
Python
|
src/wavelet-FE/datasets/custom_dataset.py
|
Omekaago101/Intracranial-Hemorrhage-Classification
|
4f53da3a3869be7b451edc558ef06c5c41083b4b
|
[
"MIT"
] | null | null | null |
src/wavelet-FE/datasets/custom_dataset.py
|
Omekaago101/Intracranial-Hemorrhage-Classification
|
4f53da3a3869be7b451edc558ef06c5c41083b4b
|
[
"MIT"
] | null | null | null |
src/wavelet-FE/datasets/custom_dataset.py
|
Omekaago101/Intracranial-Hemorrhage-Classification
|
4f53da3a3869be7b451edc558ef06c5c41083b4b
|
[
"MIT"
] | null | null | null |
import cv2
import os
import torch
import numpy as np
from torch.utils.data import Dataset
from albumentations.pytorch.transforms import ToTensorV2
from albumentations import (MedianBlur, Compose, Normalize, OpticalDistortion, HorizontalFlip,
VerticalFlip, ShiftScaleRotate, Transpose, OneOf, IAAAdditiveGaussianNoise,
GaussNoise, RandomGamma, Blur, RandomBrightness, HueSaturationValue,
RandomBrightnessContrast, GridDistortion,Lambda, NoOp, CenterCrop, Resize,RandomResizedCrop
)
class IntracranialDataset(Dataset):
def __init__(self, cfg, df, path, labels,AUTOCROP,HFLIP,TRANSPOSE,mode='train'):
self.path = path
self.data = df
self.labels = labels
self.crop = AUTOCROP
self.cfg = cfg
self.mode = mode
self.transpose = TRANSPOSE
self.hflip = HFLIP
self.lbls = cfg.CONST.LABELS
if self.mode == "train":
self.transform = Compose([
RandomResizedCrop(cfg.DATA.IMG_SIZE, cfg.DATA.IMG_SIZE,
interpolation=cv2.INTER_LINEAR, scale=(0.8, 1)),
OneOf([
HorizontalFlip(p=1.),
VerticalFlip(p=1.),
]),
OneOf([
ShiftScaleRotate(
shift_limit=0.0625,
scale_limit=0.1,
rotate_limit=30,
border_mode=cv2.BORDER_CONSTANT,
value=0,
p=1.),
GridDistortion(
distort_limit=0.2,
border_mode=cv2.BORDER_CONSTANT,
value=0,
p=1.),
OpticalDistortion(
distort_limit=0.2,
shift_limit=0.15,
border_mode=cv2.BORDER_CONSTANT,
value=0,
p=1.),
NoOp()
]),
OneOf([
IAAAdditiveGaussianNoise(p=1.),
GaussNoise(p=1.),
NoOp()
]),
OneOf([
MedianBlur(blur_limit=3, p=1.),
Blur(blur_limit=3, p=1.),
NoOp()
])
])
elif self.mode == 'test' or self.mode == 'valid':
HFLIPVAL = 1.0 if self.hflip == 'T' else 0.0
TRANSPOSEVAL = 1.0 if self.transpose == 'P' else 0.0
self.transform = Compose([
HorizontalFlip(p=HFLIPVAL),
Transpose(p=TRANSPOSEVAL),
Normalize(mean=[0.22363983, 0.18190407, 0.2523437 ],
std=[0.32451536, 0.2956294, 0.31335256], max_pixel_value=255.0, p=1.0),
])
self.totensor = ToTensorV2()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
img_name = os.path.join(self.path, self.data.loc[idx, 'Image'] + '.jpg')
#img = cv2.imread(img_name, cv2.IMREAD_GRAYSCALE)
img = cv2.imread(img_name)
if self.crop:
try:
try:
img = self.autocrop(img, threshold=0, kernsel_size = img.shape[0]//15)
except:
img = self.autocrop(img, threshold=0)
except:
1
img = cv2.resize(img,(self.cfg.DATA.IMG_SIZE,self.cfg.DATA.IMG_SIZE))
if self.mode == "train":
augmented = self.transform(image=img)
img = augmented['image']
if self.labels:
labels = torch.tensor(
self.data.loc[idx, self.cfg.CONST.LABELS])
return {'image': img, 'labels': labels}
else:
return {'image': img}
def autocrop(image, threshold=0):
"""Crops any edges below or equal to threshold
Crops blank image to 1x1.
Returns cropped image.
https://stackoverflow.com/questions/13538748/crop-black-edges-with-opencv
"""
if len(image.shape) == 3:
flatImage = np.max(image, 2)
else:
flatImage = image
rows = np.where(np.max(flatImage, 0) > threshold)[0]
cols = np.where(np.max(flatImage, 1) > threshold)[0]
image = image[cols[0]: cols[-1] + 1, rows[0]: rows[-1] + 1]
#logger.info(image.shape)
sqside = max(image.shape)
imageout = np.zeros((sqside, sqside, 3), dtype = 'uint8')
imageout[:image.shape[0], :image.shape[1],:] = image.copy()
return imageout
| 40.117647 | 118 | 0.492459 |
7948ce6dbb774668f7bb627c6948581403c45b02
| 587 |
py
|
Python
|
download_videos.py
|
ferreirafabio/FlyingShapesDataset
|
ba4aba153daac4f5131f16b0467c52c2eb094850
|
[
"MIT"
] | null | null | null |
download_videos.py
|
ferreirafabio/FlyingShapesDataset
|
ba4aba153daac4f5131f16b0467c52c2eb094850
|
[
"MIT"
] | null | null | null |
download_videos.py
|
ferreirafabio/FlyingShapesDataset
|
ba4aba153daac4f5131f16b0467c52c2eb094850
|
[
"MIT"
] | 1 |
2019-10-06T21:27:45.000Z
|
2019-10-06T21:27:45.000Z
|
import requests
tfrecords_shared_links = "flyingshapes_videos.txt"
with open(tfrecords_shared_links, 'r') as link_file:
url_list = link_file.read().splitlines()
for i, link in enumerate(url_list):
file_name = "videos" + ".tar.gz." + str(i+1).zfill(2)
r = requests.get(link, stream=True)
print("Downloading", file_name)
with open(file_name, 'wb') as f:
dl = 0
for chunk in r.iter_content(chunk_size=1024):
if chunk:
dl += len(chunk)
f.write(chunk)
f.flush()
print("Downloading", file_name, "completed")
| 20.964286 | 57 | 0.630324 |
7948cf101b8c81118b3add4fa66e096192dd0915
| 4,078 |
py
|
Python
|
neutronclient/common/clientmanager.py
|
BobzhouCH/python-neutronclient-acc
|
ff58c60cc96c2e2a4e25088ae3ce5ff75ea42241
|
[
"Apache-2.0"
] | null | null | null |
neutronclient/common/clientmanager.py
|
BobzhouCH/python-neutronclient-acc
|
ff58c60cc96c2e2a4e25088ae3ce5ff75ea42241
|
[
"Apache-2.0"
] | null | null | null |
neutronclient/common/clientmanager.py
|
BobzhouCH/python-neutronclient-acc
|
ff58c60cc96c2e2a4e25088ae3ce5ff75ea42241
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Manage access to the clients, including authenticating when needed.
"""
import logging
from neutronclient import client
from neutronclient.neutron import client as neutron_client
LOG = logging.getLogger(__name__)
class ClientCache(object):
"""Descriptor class for caching created client handles."""
def __init__(self, factory):
self.factory = factory
self._handle = None
def __get__(self, instance, owner):
# Tell the ClientManager to login to keystone
if self._handle is None:
self._handle = self.factory(instance)
return self._handle
class ClientManager(object):
"""Manages access to API clients, including authentication."""
neutron = ClientCache(neutron_client.make_client)
# Provide support for old quantum commands (for example
# in stable versions)
quantum = neutron
def __init__(self, token=None, url=None,
auth_url=None,
endpoint_type=None,
tenant_name=None,
tenant_id=None,
username=None,
user_id=None,
password=None,
region_name=None,
api_version=None,
auth_strategy=None,
insecure=False,
ca_cert=None,
log_credentials=False,
service_type=None,
service_name=None,
timeout=None,
retries=0,
raise_errors=True,
session=None,
auth=None,
):
self._token = token
self._url = url
self._auth_url = auth_url
self._service_type = service_type
self._service_name = service_name
self._endpoint_type = endpoint_type
self._tenant_name = tenant_name
self._tenant_id = tenant_id
self._username = username
self._user_id = user_id
self._password = password
self._region_name = region_name
self._api_version = api_version
self._service_catalog = None
self._auth_strategy = auth_strategy
self._insecure = insecure
self._ca_cert = ca_cert
self._log_credentials = log_credentials
self._timeout = timeout
self._retries = retries
self._raise_errors = raise_errors
self._session = session
self._auth = auth
return
def initialize(self):
if not self._url:
httpclient = client.construct_http_client(
username=self._username,
user_id=self._user_id,
tenant_name=self._tenant_name,
tenant_id=self._tenant_id,
password=self._password,
region_name=self._region_name,
auth_url=self._auth_url,
service_type=self._service_type,
service_name=self._service_name,
endpoint_type=self._endpoint_type,
insecure=self._insecure,
ca_cert=self._ca_cert,
timeout=self._timeout,
session=self._session,
auth=self._auth,
log_credentials=self._log_credentials)
httpclient.authenticate()
# Populate other password flow attributes
self._token = httpclient.auth_token
self._url = httpclient.endpoint_url
| 33.983333 | 78 | 0.607896 |
7948cfbb581efbfe41001fec640fb020aeb368d2
| 684 |
py
|
Python
|
polls/admin.py
|
intelivix/django-tutorial
|
38f4eccb96d78ccd73522c0a30c1fc8d3d8c4fdd
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
intelivix/django-tutorial
|
38f4eccb96d78ccd73522c0a30c1fc8d3d8c4fdd
|
[
"MIT"
] | null | null | null |
polls/admin.py
|
intelivix/django-tutorial
|
38f4eccb96d78ccd73522c0a30c1fc8d3d8c4fdd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import admin
from polls.models import Choice
from polls.models import Question
class ChoiceInline(admin.TabularInline):
model = Choice
extra = 3
class QuestionAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['question_text']}),
('Date information', {'fields': ['pub_date'],
'classes': ['collapse']}),
]
list_display = ('question_text', 'pub_date', 'was_published_recently')
list_filter = ['pub_date']
search_fields = ['question_text']
inlines = [ChoiceInline]
admin.site.register(Choice)
admin.site.register(Question, QuestionAdmin)
| 26.307692 | 74 | 0.635965 |
7948d0d3929f51dace45f89d3a0824a2d7fa0766
| 326 |
py
|
Python
|
YP/binary_num.py
|
HelloAgni/Algorithm
|
cd804e7535137be26c751125fd6572db57ce1866
|
[
"MIT"
] | null | null | null |
YP/binary_num.py
|
HelloAgni/Algorithm
|
cd804e7535137be26c751125fd6572db57ce1866
|
[
"MIT"
] | null | null | null |
YP/binary_num.py
|
HelloAgni/Algorithm
|
cd804e7535137be26c751125fd6572db57ce1866
|
[
"MIT"
] | null | null | null |
def binary_num(n):
"""
Выведите двоичное представление числа.
Не используйте встроенные средства языка по переводу чисел
в бинарное представление.
"""
r = ''
while n > 0:
r = str(n % 2) + r
n = n // 2
print(r)
if __name__ == '__main__':
n = int(input())
binary_num(n)
| 19.176471 | 62 | 0.56135 |
7948d27feb7e047144b7a3fd7120df83976fdb0f
| 667 |
py
|
Python
|
api/exporterhub/settings/local.py
|
yangahh/exporterhub-dev
|
c41783f4b82812358fa05ec55588c5bb6a02134e
|
[
"MIT"
] | 2 |
2020-11-17T11:13:53.000Z
|
2021-01-13T00:38:17.000Z
|
api/exporterhub/settings/local.py
|
yangahh/exporterhub-dev
|
c41783f4b82812358fa05ec55588c5bb6a02134e
|
[
"MIT"
] | null | null | null |
api/exporterhub/settings/local.py
|
yangahh/exporterhub-dev
|
c41783f4b82812358fa05ec55588c5bb6a02134e
|
[
"MIT"
] | 1 |
2020-11-17T11:13:58.000Z
|
2020-11-17T11:13:58.000Z
|
from .base import *
import my_settings
DATABASES = my_settings.DATABASES
SECRET_KEY = my_settings.SECRET_KEY
ALGORITHM = my_settings.ALGORITHM
DEBUG = True
# LOGGING = {
# 'version': 1,
# 'filters': {
# 'require_debug_true': {
# '()': 'django.utils.log.RequireDebugTrue',
# }
# },
# 'handlers': {
# 'console': {
# 'level': 'DEBUG',
# 'filters': ['require_debug_true'],
# 'class': 'logging.StreamHandler',
# }
# },
# 'loggers': {
# 'django.db.backends': {
# 'level': 'DEBUG',
# 'handlers': ['console'],
# }
# }
# }
| 20.84375 | 56 | 0.482759 |
7948d2b02146b48a644ddbcd2f0bf4f1b148380b
| 12,544 |
py
|
Python
|
azure-batch/azure/batch/models/cloud_task.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | 1 |
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-batch/azure/batch/models/cloud_task.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
azure-batch/azure/batch/models/cloud_task.py
|
jmalobicky/azure-sdk-for-python
|
61234a3d83f8fb481d1dd2386e54e888864878fd
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CloudTask(Model):
"""An Azure Batch task.
Batch will retry tasks when a recovery operation is triggered on a compute
node. Examples of recovery operations include (but are not limited to) when
an unhealthy compute node is rebooted or a compute node disappeared due to
host failure. Retries due to recovery operations are independent of and are
not counted against the maxTaskRetryCount. Even if the maxTaskRetryCount is
0, an internal retry due to a recovery operation may occur. Because of
this, all tasks should be idempotent. This means tasks need to tolerate
being interrupted and restarted without causing any corruption or duplicate
data. The best practice for long running tasks is to use some form of
checkpointing.
:param id: A string that uniquely identifies the task within the job. The
ID can contain any combination of alphanumeric characters including
hyphens and underscores, and cannot contain more than 64 characters.
:type id: str
:param display_name: A display name for the task. The display name need
not be unique and can contain any Unicode characters up to a maximum
length of 1024.
:type display_name: str
:param url: The URL of the task.
:type url: str
:param e_tag: The ETag of the task. This is an opaque string. You can use
it to detect whether the task has changed between requests. In particular,
you can be pass the ETag when updating a task to specify that your changes
should take effect only if nobody else has modified the task in the
meantime.
:type e_tag: str
:param last_modified: The last modified time of the task.
:type last_modified: datetime
:param creation_time: The creation time of the task.
:type creation_time: datetime
:param exit_conditions: How the Batch service should respond when the task
completes.
:type exit_conditions: ~azure.batch.models.ExitConditions
:param state: The current state of the task. Possible values include:
'active', 'preparing', 'running', 'completed'
:type state: str or ~azure.batch.models.TaskState
:param state_transition_time: The time at which the task entered its
current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the task. This property is
not set if the task is in its initial Active state. Possible values
include: 'active', 'preparing', 'running', 'completed'
:type previous_state: str or ~azure.batch.models.TaskState
:param previous_state_transition_time: The time at which the task entered
its previous state. This property is not set if the task is in its initial
Active state.
:type previous_state_transition_time: datetime
:param command_line: The command line of the task. For multi-instance
tasks, the command line is executed as the primary task, after the primary
task and all subtasks have finished executing the coordination command
line. The command line does not run under a shell, and therefore cannot
take advantage of shell features such as environment variable expansion.
If you want to take advantage of such features, you should invoke the
shell in the command line, for example using "cmd /c MyCommand" in Windows
or "/bin/sh -c MyCommand" in Linux. If the command line refers to file
paths, it should use a relative path (relative to the task working
directory), or use the Batch provided environment variable
(https://docs.microsoft.com/en-us/azure/batch/batch-compute-node-environment-variables).
:type command_line: str
:param container_settings: The settings for the container under which the
task runs. If the pool that will run this task has containerConfiguration
set, this must be set as well. If the pool that will run this task doesn't
have containerConfiguration set, this must not be set. When this is
specified, all directories recursively below the AZ_BATCH_NODE_ROOT_DIR
(the root of Azure Batch directories on the node) are mapped into the
container, all task environment variables are mapped into the container,
and the task command line is executed in the container.
:type container_settings: ~azure.batch.models.TaskContainerSettings
:param resource_files: A list of files that the Batch service will
download to the compute node before running the command line. For
multi-instance tasks, the resource files will only be downloaded to the
compute node on which the primary task is executed.
:type resource_files: list[~azure.batch.models.ResourceFile]
:param output_files: A list of files that the Batch service will upload
from the compute node after running the command line. For multi-instance
tasks, the files will only be uploaded from the compute node on which the
primary task is executed.
:type output_files: list[~azure.batch.models.OutputFile]
:param environment_settings: A list of environment variable settings for
the task.
:type environment_settings: list[~azure.batch.models.EnvironmentSetting]
:param affinity_info: A locality hint that can be used by the Batch
service to select a compute node on which to start the new task.
:type affinity_info: ~azure.batch.models.AffinityInformation
:param constraints: The execution constraints that apply to this task.
:type constraints: ~azure.batch.models.TaskConstraints
:param user_identity: The user identity under which the task runs. If
omitted, the task runs as a non-administrative user unique to the task.
:type user_identity: ~azure.batch.models.UserIdentity
:param execution_info: Information about the execution of the task.
:type execution_info: ~azure.batch.models.TaskExecutionInformation
:param node_info: Information about the compute node on which the task
ran.
:type node_info: ~azure.batch.models.ComputeNodeInformation
:param multi_instance_settings: An object that indicates that the task is
a multi-instance task, and contains information about how to run the
multi-instance task.
:type multi_instance_settings: ~azure.batch.models.MultiInstanceSettings
:param stats: Resource usage statistics for the task.
:type stats: ~azure.batch.models.TaskStatistics
:param depends_on: The tasks that this task depends on. This task will not
be scheduled until all tasks that it depends on have completed
successfully. If any of those tasks fail and exhaust their retry counts,
this task will never be scheduled.
:type depends_on: ~azure.batch.models.TaskDependencies
:param application_package_references: A list of application packages that
the Batch service will deploy to the compute node before running the
command line. Application packages are downloaded and deployed to a shared
directory, not the task working directory. Therefore, if a referenced
package is already on the compute node, and is up to date, then it is not
re-downloaded; the existing copy on the compute node is used. If a
referenced application package cannot be installed, for example because
the package has been deleted or because download failed, the task fails.
:type application_package_references:
list[~azure.batch.models.ApplicationPackageReference]
:param authentication_token_settings: The settings for an authentication
token that the task can use to perform Batch service operations. If this
property is set, the Batch service provides the task with an
authentication token which can be used to authenticate Batch service
operations without requiring an account access key. The token is provided
via the AZ_BATCH_AUTHENTICATION_TOKEN environment variable. The operations
that the task can carry out using the token depend on the settings. For
example, a task can request job permissions in order to add other tasks to
the job, or check the status of the job or of other tasks under the job.
:type authentication_token_settings:
~azure.batch.models.AuthenticationTokenSettings
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'e_tag': {'key': 'eTag', 'type': 'str'},
'last_modified': {'key': 'lastModified', 'type': 'iso-8601'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'exit_conditions': {'key': 'exitConditions', 'type': 'ExitConditions'},
'state': {'key': 'state', 'type': 'TaskState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'TaskState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
'command_line': {'key': 'commandLine', 'type': 'str'},
'container_settings': {'key': 'containerSettings', 'type': 'TaskContainerSettings'},
'resource_files': {'key': 'resourceFiles', 'type': '[ResourceFile]'},
'output_files': {'key': 'outputFiles', 'type': '[OutputFile]'},
'environment_settings': {'key': 'environmentSettings', 'type': '[EnvironmentSetting]'},
'affinity_info': {'key': 'affinityInfo', 'type': 'AffinityInformation'},
'constraints': {'key': 'constraints', 'type': 'TaskConstraints'},
'user_identity': {'key': 'userIdentity', 'type': 'UserIdentity'},
'execution_info': {'key': 'executionInfo', 'type': 'TaskExecutionInformation'},
'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'},
'multi_instance_settings': {'key': 'multiInstanceSettings', 'type': 'MultiInstanceSettings'},
'stats': {'key': 'stats', 'type': 'TaskStatistics'},
'depends_on': {'key': 'dependsOn', 'type': 'TaskDependencies'},
'application_package_references': {'key': 'applicationPackageReferences', 'type': '[ApplicationPackageReference]'},
'authentication_token_settings': {'key': 'authenticationTokenSettings', 'type': 'AuthenticationTokenSettings'},
}
def __init__(self, id=None, display_name=None, url=None, e_tag=None, last_modified=None, creation_time=None, exit_conditions=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None, command_line=None, container_settings=None, resource_files=None, output_files=None, environment_settings=None, affinity_info=None, constraints=None, user_identity=None, execution_info=None, node_info=None, multi_instance_settings=None, stats=None, depends_on=None, application_package_references=None, authentication_token_settings=None):
super(CloudTask, self).__init__()
self.id = id
self.display_name = display_name
self.url = url
self.e_tag = e_tag
self.last_modified = last_modified
self.creation_time = creation_time
self.exit_conditions = exit_conditions
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
self.command_line = command_line
self.container_settings = container_settings
self.resource_files = resource_files
self.output_files = output_files
self.environment_settings = environment_settings
self.affinity_info = affinity_info
self.constraints = constraints
self.user_identity = user_identity
self.execution_info = execution_info
self.node_info = node_info
self.multi_instance_settings = multi_instance_settings
self.stats = stats
self.depends_on = depends_on
self.application_package_references = application_package_references
self.authentication_token_settings = authentication_token_settings
| 61.490196 | 571 | 0.722417 |
7948d393f32ea4d775624b7d29369ee11054d574
| 220 |
py
|
Python
|
ex_generator/ex2.py
|
petrewoo/Trash
|
deb7b4c5b9af76b7dfa6bd1d65233cd4f5e8993e
|
[
"MIT"
] | null | null | null |
ex_generator/ex2.py
|
petrewoo/Trash
|
deb7b4c5b9af76b7dfa6bd1d65233cd4f5e8993e
|
[
"MIT"
] | null | null | null |
ex_generator/ex2.py
|
petrewoo/Trash
|
deb7b4c5b9af76b7dfa6bd1d65233cd4f5e8993e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
def test():
for y in [1, 2, 3]:
y = (yield y)
print 'in test:{}'.format(y)
y += 1
x = test()
z = None
z = x.send(z)
z = x.send(z)
x.close()
| 12.941176 | 36 | 0.454545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.